code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python3
import argparse
import code
import readline
import signal
import sys
from parse import Argparser, premain, SigHandler_SIGINT,PythonInterpreter
from utils import ParseFlags
def getWASMModule():
module_path = sys.argv[1]
interpreter = PythonInterpreter()
module = interpreter.parse(module_path)
def main():
signal.signal(signal.SIGINT, SigHandler_SIGINT)
argparser = Argparser()
if argparser.args.dbg:
try:
premain(argparser)
except Exception as e:
print(e.__doc__)
if e.message: print(e.message)
variables = globals().copy()
variables.update(locals())
shell = code.InteractiveConsole(variables)
shell.interact(banner="DEVIWASM REPL")
else:
premain(argparser)
if __name__ == "__main__":
main()
|
bloodstalker/mutator
|
bruiser/wasm/dwasm.py
|
Python
|
gpl-3.0
| 855
|
import unittest
import decodes.core as dc
from decodes.core import *
from decodes.extensions.voxel import *
class Tests(unittest.TestCase):
def test_constructor(self):
bounds = Bounds(center=Point(),dim_x=8,dim_y=8,dim_z=8)
vf = VoxelField(bounds,4,4,4)
vf.set(0,0,0,10.0)
vf.set(3,3,3,10.0)
self.assertEqual(vf.get(0,0,0),10.0)
self.assertEqual(vf.get(3,3,3),10.0)
self.assertEqual(vf.get(2,2,2),0.0)
def test_bounds_and_cpt(self):
bounds = Bounds(center=Point(),dim_x=8,dim_y=8,dim_z=8)
vf = VoxelField(bounds,4,4,4)
self.assertEqual(vf.dim_pixel,Vec(2,2,2))
self.assertEqual(vf.cpt_at(0,0,0),Point(-3,-3,-3))
vf.bounds = Bounds(center=Point(),dim_x=12,dim_y=12,dim_z=8)
self.assertEqual(vf.dim_pixel,Vec(3,3,2))
self.assertEqual(vf.cpt_at(0,0,0),Point(-4.5,-4.5,-3))
|
ksteinfe/decodes
|
tests/test_voxel.py
|
Python
|
gpl-3.0
| 906
|
from __future__ import print_function
import os
from ftplib import FTP
def place_file(ftp, filename):
ftp.storbinary('STOR ' + filename,open(filename, 'rb'))
if __name__ == '__main__':
url = 'ftp.k-bits.com'
ftp = FTP(url)
user = 'usuario1@k-bits.com'
passw = 'happy1234'
ftp.login(user, passw)
remoto = []
ftp.dir(remoto.append)
for r in remoto:
print(r)
directorio_local = os.listdir('.')
place_file(ftp, 'Banamex.csv')
ftp.quit()
|
chrisRubiano/TAP
|
dolar/ftp.py
|
Python
|
gpl-3.0
| 494
|
from vsg.rules import token_case
from vsg import token
lTokens = []
lTokens.append(token.package_body.body_keyword)
class rule_501(token_case):
'''
This rule checks the **body** keyword has proper case.
|configuring_uppercase_and_lowercase_rules_link|
**Violation**
.. code-block:: vhdl
package BODY FIFO_PKG is
**Fix**
.. code-block:: vhdl
package body FIFO_PKG is
'''
def __init__(self):
token_case.__init__(self, 'package_body', '501', lTokens)
self.groups.append('case::keyword')
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/rules/package_body/rule_501.py
|
Python
|
gpl-3.0
| 563
|
import xml.etree.ElementTree as ElementTree
from model.dynamic.api import api
from model.dynamic.skills.skill_queue_item import SkillQueueItem
class SkillQueue(object):
def __init__(self, user_id, api_key, character_id):
api.fetch("char", "SkillQueue", user_id, api_key, character_id)
tree = ElementTree.parse("%s/SkillQueue.xml.aspx" % \
api.build_path("char", user_id, character_id))
root = tree.getroot()
rowset = root.find("result").find("rowset")
self.skill_queue = list()
if rowset.getchildren():
for element in rowset:
self.skill_queue.insert\
(int(element.get("queuePosition")),
SkillQueueItem(int(element.get("typeID")),
int(element.get("level")),
int(element.get("startSP")),
int(element.get("endSP")),
element.get("startTime"),
element.get("endTime")))
|
Iconik/eve-suite
|
src/model/dynamic/skills/skill_queue.py
|
Python
|
gpl-3.0
| 1,072
|
#!/usr/bin/env python
# encoding: utf-8
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
try:
from pyv8 import PyV8
except ImportError:
import PyV8
pyv8 = PyV8
def execute(code):
if isinstance(code, unicode):
code = code.encode("utf-8")
with PyV8.JSContext() as c:
c.enter()
return c.eval(code)
def _convert(data):
result = {}
for key in data.keys():
if isinstance(data[key], PyV8.JSObject):
result[key] = _convert(data[key])
else:
result[key] = data[key]
return result
def loads(data):
s = execute("JSON.stringify({})".format(data))
return json.loads(s)
|
MoroGasper/client
|
client/javascript.py
|
Python
|
gpl-3.0
| 1,277
|
import percolation as P
import rdflib as r
import os
import time
from rdflib import ConjunctiveGraph
TT = time.time()
class PercolationServer:
def __init__(self, percolationdir="~/.percolation/"):
percolationdir = os.path.expanduser(percolationdir)
if not os.path.isdir(percolationdir):
os.mkdir(percolationdir)
dbdir = percolationdir+"sleepydb/"
if not os.path.isdir(dbdir):
os.mkdir(dbdir)
percolation_graph = ConjunctiveGraph(store="Sleepycat")
try:
percolation_graph.open(dbdir, create=False)
except: # get exception type (?)
percolation_graph.open(dbdir, create=True)
P.percolation_graph = percolation_graph
self.percolation_graph = percolation_graph
P.percolation_server = self
endpoint_url_ = os.getenv("PERCOLATION_ENDPOINT")
P.client = None
def start(start_session=True, endpoint_url=endpoint_url_):
"""Startup routine"""
c("endpoint url", endpoint_url)
if endpoint_url:
P.client = P.rdf.sparql.Client(endpoint_url)
else:
P.client = None
PercolationServer()
if start_session:
P.utils.startSession()
# P.utils.aaSession()
def close(): # duplicate in legacy/outlines.py
P.percolation_graph.close()
def check(*args):
global TT
if not P.QUIET:
if args and isinstance(args[0], str) \
and (len(args[0]) == args[0].count("\n")):
print("{}{:.3f}".format(args[0], time.time()-TT), *args[1:])
TT = time.time()
else:
print("{:.3f}".format(time.time()-TT), *args)
TT = time.time()
if args[0] == "prompt":
input("ANY KEY TO CONTINUE")
QUIET = False
c = check
if __name__ == "__main__":
start()
rdflibok = isinstance(P.percolation_graph, r.ConjunctiveGraph)
ntriples = len(P.percolation_graph)
c("rdflib in P.percolation_graph:", rdflibok, "ntriples:", ntriples)
if endpoint_url_:
ntriples = P.client.getNTriples()
ngraphs = P.client.getNGraphs()
c("connected to endpoint:", endpoint_url_, "with {} graphs \
and {} triples".format(ngraphs, ntriples))
else:
c("not connected to any remote endpoint\n\
(relying only on rdflib percolation_graph)")
choice = input("print graphs (y/N)")
if choice == "y":
graphs = P.client.getAllGraphs()
ntriples_ = []
for graph in graphs:
ntriples_ += [P.client.getNTriples(graph)]
c(list(zip(ntriples_, graphs)))
choice = input("print triples (y/N)")
if choice == "y":
c(P.client.getAllTriples())
|
ttm/percolation
|
percolation/bootstrap.py
|
Python
|
gpl-3.0
| 2,675
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utilities for running data through the TM, and analyzing the results.
"""
from prettytable import PrettyTable
class TemporalMemoryTestMachine(object):
"""
Base TM test machine class.
"""
def __init__(self, tm):
"""
@param tm (TM) Temporal memory
"""
# Save member variables
self.tm = tm
def feedSequence(self, sequence, learn=True):
"""
Feed a sequence through the TM.
@param sequence (list) List of patterns, with None for resets
@param learn (bool) Learning enabled
@return (list) List of sets containing predictive cells,
one for each element in `sequence`
"""
results = []
for pattern in sequence:
if pattern == None:
self.tm.reset()
else:
self.tm.compute(pattern, learn=learn)
results.append(self.tm.predictiveCells)
return results
def computeDetailedResults(self, results, sequence):
"""
Compute detailed results from results of `feedSequence`.
@param results (list) Results from `feedSequence`
@param sequence (list) Sequence that generated the results
@return (tuple) Contains:
`predictedActiveCellsList` (list),
`predictedInactiveCellsList` (list),
`predictedActiveColumnsList` (list),
`predictedInactiveColumnsList` (list),
`unpredictedActiveColumnsList` (list)
"""
predictedActiveCellsList = [set()]
predictedInactiveCellsList = [set()]
predictedActiveColumnsList = [set()]
predictedInactiveColumnsList = [set()]
unpredictedActiveColumnsList = [set()]
# TODO: Make sure the first row is accurate, not just empty
for i in xrange(1, len(results)):
pattern = sequence[i]
predictedActiveCells = set()
predictedInactiveCells = set()
predictedActiveColumns = set()
predictedInactiveColumns = set()
unpredictedActiveColumns = set()
if pattern != None:
prevPredictedCells = results[i-1]
for prevPredictedCell in prevPredictedCells:
prevPredictedColumn = self.tm.connections.columnForCell(
prevPredictedCell)
if prevPredictedColumn in pattern:
predictedActiveCells.add(prevPredictedCell)
predictedActiveColumns.add(prevPredictedColumn)
else:
predictedInactiveCells.add(prevPredictedCell)
predictedInactiveColumns.add(prevPredictedColumn)
unpredictedActiveColumns = pattern - predictedActiveColumns
predictedActiveCellsList.append(predictedActiveCells)
predictedInactiveCellsList.append(predictedInactiveCells)
predictedActiveColumnsList.append(predictedActiveColumns)
predictedInactiveColumnsList.append(predictedInactiveColumns)
unpredictedActiveColumnsList.append(unpredictedActiveColumns)
return (predictedActiveCellsList,
predictedInactiveCellsList,
predictedActiveColumnsList,
predictedInactiveColumnsList,
unpredictedActiveColumnsList)
@staticmethod
def prettyPrintDetailedResults(detailedResults,
sequence,
patternMachine,
verbosity=1):
"""
Pretty print the detailed results from `feedSequence`.
@param detailedResults (list) Detailed results from
`computeDetailedResults`
@param sequence (list) Sequence that generated the results
@param patternMachine (PatternMachine) Pattern machine
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
cols = ["Pattern",
"predicted active columns",
"predicted inactive columns",
"unpredicted active columns",
"# predicted active cells",
"# predicted inactive cells"]
if verbosity > 2:
cols += ["predicted active cells",
"predicted inactive cells"]
table = PrettyTable(cols)
(
predictedActiveCellsList,
predictedInactiveCellsList,
predictedActiveColumnsList,
predictedInactiveColumnsList,
unpredictedActiveColumnsList
) = detailedResults
for i in xrange(len(sequence)):
pattern = sequence[i]
if pattern == None:
row = ["<reset>", 0, 0, 0, 0, 0]
if verbosity > 2:
row += [0, 0]
else:
row = []
row.append(patternMachine.prettyPrintPattern(pattern,
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(predictedActiveColumnsList[i],
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(predictedInactiveColumnsList[i],
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(unpredictedActiveColumnsList[i],
verbosity=verbosity))
row.append(len(predictedActiveCellsList[i]))
row.append(len(predictedInactiveCellsList[i]))
if verbosity > 2:
row.append(list(predictedActiveCellsList[i]))
row.append(list(predictedInactiveCellsList[i]))
table.add_row(row)
return table.get_string()
def prettyPrintConnections(self):
"""
Pretty print the connections in the temporal memory.
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
tm = self.tm
text = ""
text += ("Segments: (format => "
"{segment: [(source cell, permanence), ...])\n")
text += "------------------------------------\n"
columns = range(tm.connections.numberOfColumns())
for column in columns:
cells = tm.connections.cellsForColumn(column)
for cell in cells:
segmentDict = dict()
for seg in tm.connections.segmentsForCell(cell):
synapseList = []
for synapse in tm.connections.synapsesForSegment(seg):
(_, sourceCell, permanence) = tm.connections.dataForSynapse(synapse)
synapseList.append([sourceCell,
permanence])
segmentDict[seg] = synapseList
text += ("Column {0} / Cell {1}:\t{2}\n".format(
column, cell, segmentDict))
if column < len(columns) - 1: # not last
text += "\n"
text += "------------------------------------\n"
return text
|
syl20bnr/nupic
|
nupic/test/temporal_memory_test_machine.py
|
Python
|
gpl-3.0
| 7,649
|
from callback_event import *
def getOddNumber(k,getEvenNumber): return 1+getEvenNumber(k)
def main():
k=1
i=getOddNumber(k,double);
print(i)
i=getOddNumber(k,quadruple);
print(i)
i=getOddNumber(k,lambda x:x*8)
print(i)
if __name__=="__main__":main()
|
AlexYu-beta/CppTemplateProgrammingDemo
|
Demo1_8/demo1_8.py
|
Python
|
gpl-3.0
| 278
|
import subprocess, sys
def run_doxygen(folder):
"""Run the doxygen make command in the designated folder"""
try:
retcode = subprocess.call("cd %s; make" % folder, shell=True)
if retcode < 0:
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
def generate_doxygen_xml(app):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
if read_the_docs_build:
run_doxygen("..")
def setup(app):
# Add hook for building doxygen xml when needed
app.connect("builder-inited", generate_doxygen_xml)
|
UCLOrengoGroup/cath-tools
|
docs/conf.py
|
Python
|
gpl-3.0
| 743
|
from __future__ import print_function, absolute_import
from PySide2 import QtGui,QtCore,QtWidgets
from math import *
import pickle, os, json
import learnbot_dsl.guis.EditVar as EditVar
from learnbot_dsl.learnbotCode.Block import *
from learnbot_dsl.learnbotCode.Language import getLanguage
from learnbot_dsl.learnbotCode.toQImage import *
from learnbot_dsl.learnbotCode.Parser import parserLearntBotCodeOnlyUserFuntion
from learnbot_dsl.blocksConfig import pathImgBlocks
from learnbot_dsl.learnbotCode import getAprilTextDict
class KeyPressEater(QtCore.QObject):
def eventFilter(self, obj, event):
if isinstance(event, QtGui.QMouseEvent) and event.buttons() & QtCore.Qt.RightButton:
return True
return False
def toLBotPy(inst, ntab=1, offset=0):
text = inst[0]
if inst[1]["TYPE"] in [USERFUNCTION, LIBRARY]:
text = inst[0] + "()"
else:
inst[1]["VISUALBLOCK"].startOffset = offset
if inst[1]["TYPE"] is CONTROL:
if inst[1]["VARIABLES"] is not None:
text = inst[0] + "("
for var in inst[1]["VARIABLES"]:
text += var + ", "
text = text[0:-2] + ""
text += ")"
if inst[1]["TYPE"] is FUNTION:
text = "function." + inst[0] + "("
if inst[1]["VARIABLES"] is not None:
for var in inst[1]["VARIABLES"]:
text += var + ", "
text = text[0:-2] + ""
text += ")"
elif inst[1]["TYPE"] is VARIABLE:
text = inst[0]
if inst[1]["VARIABLES"] is not None:
text += " = "
for var in inst[1]["VARIABLES"]:
text += var
if inst[1]["RIGHT"] is not None:
text += " "
text += toLBotPy(inst[1]["RIGHT"], ntab, len(text) + offset)
if inst[1]["BOTTOMIN"] is not None:
text += ":\n" + "\t" * ntab
text += toLBotPy(inst[1]["BOTTOMIN"], ntab + 1, len(text) + offset)
if inst[0] in ["while", "while True"]:
text += "\n" + "\t" * (ntab - 1) + "end"
if inst[0] == "else" or (inst[0] in ["if", "elif"] and (inst[1]["BOTTOM"] is None or (
inst[1]["BOTTOM"] is not None and inst[1]["BOTTOM"][0] not in ["elif", "else"]))):
text += "\n" + "\t" * (ntab - 1) + "end"
inst[1]["VISUALBLOCK"].endOffset = len(text)-1 + offset
if inst[1]["BOTTOM"] is not None:
text += "\n" + "\t" * (ntab - 1)
text += toLBotPy(inst[1]["BOTTOM"], ntab, len(text) + offset)
return text
def EuclideanDist(p1, p2):
p = p1 - p2
return sqrt(pow(p.x(), 2) + pow(p.y(), 2))
class VarGui(QtWidgets.QDialog, EditVar.Ui_Dialog):
def init(self):
self.setupUi(self)
def getTable(self):
return self.table
def setSlotToDeleteButton(self, fun):
self.deleteButton.clicked.connect(fun)
self.okButton.clicked.connect(self.close)
class VisualBlock(QtWidgets.QGraphicsPixmapItem, QtWidgets.QWidget):
def __init__(self, parentBlock, parent=None, scene=None):
self.startOffset = None
self.endOffset = None
self._notifications = []
self.parentBlock = parentBlock
self.__typeBlock = self.parentBlock.typeBlock
self.__type = self.parentBlock.type
self.id = self.parentBlock.id
self.connections = self.parentBlock.connections
self.highlighted = False
for c in self.connections:
c.setParent(self.parentBlock)
self.dicTrans = parentBlock.dicTrans
self.shouldUpdate = True
if len(self.dicTrans) is 0:
self.showtext = self.parentBlock.name
else:
self.showtext = self.dicTrans[getLanguage()]
QtWidgets.QGraphicsPixmapItem.__init__(self)
QtWidgets.QWidget.__init__(self)
def foo(x):
return 32
# Load Image of block
im = cv2.imread(self.parentBlock.file, cv2.IMREAD_UNCHANGED)
r, g, b, a = cv2.split(im)
rgb = cv2.merge((r, g, b))
hsv = cv2.cvtColor(rgb, cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(hsv)
h = h + self.parentBlock.hue
s = s + 160
hsv = cv2.merge((h, s, v))
im = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
r, g, b = cv2.split(im)
self.cvImg = cv2.merge((r, g, b, a))
self.cvImg = np.require(self.cvImg, np.uint8, 'C')
# if self.parentBlock.type is VARIABLE:
# self.showtext = self.parentBlock.name + " "+ self.showtext
img = generateBlock(self.cvImg, 34, self.showtext, self.parentBlock.typeBlock, None, self.parentBlock.type,
self.parentBlock.nameControl)
qImage = toQImage(img)
# Al multiplicar por 0 obtenemos facilmente un ndarray inicializado a 0
# similar al original
try:
self.header = copy.copy(self.cvImg[0:39, 0:149])
self.foot = copy.copy(self.cvImg[69:104, 0:149])
except:
pass
self.img = QtGui.QPixmap(qImage)
self.scene = scene
self.setFlags(QtWidgets.QGraphicsItem.ItemIsMovable)
self.setZValue(1)
self.setPos(self.parentBlock.pos)
self.scene.activeShouldSave()
self.updatePixmap()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.posmouseinItem = None
self.DialogVar = None
self.popMenu = None
self.create_dialogs()
self.sizeIn = 0
self.shouldUpdateConnections = False
def addNotification(self, notification):
tooltip = self.toolTip()
if tooltip:
tooltip += '<hr />'
tooltip += notification.simpleHtml()
self.setToolTip(tooltip)
self._notifications.append(notification)
def clearNotifications(self):
self._notifications = []
self.setToolTip('')
def notifications(self):
return self._notifications
def highlight(self):
self.highlighted = True
self.updateImg(force=True)
self.updatePixmap()
def unhighlight(self):
self.highlighted = False
self.updateImg(force=True)
self.updatePixmap()
def updatePixmap(self):
self.setPixmap(self.img)
def create_dialogs(self):
if self.DialogVar is not None:
del self.DialogVar
vars = self.parentBlock.vars
self.DialogVar = VarGui()
self.DialogVar.init()
self.DialogVar.setSlotToDeleteButton(self.delete)
self.tabVar = self.DialogVar.getTable()
self.tabVar.verticalHeader().setVisible(False)
self.tabVar.horizontalHeader().setVisible(True)
self.tabVar.setColumnCount(4)
self.tabVar.setRowCount(len(vars))
self.tableHeader = [] #QtCore.QStringList()
self.tableHeader.append(self.tr('Name'))
self.tableHeader.append(self.tr('Constant'))
self.tableHeader.append(self.tr('Set to'))
self.tableHeader.append(self.tr('Type'))
self.tabVar.setHorizontalHeaderLabels(self.tableHeader)
self.tabVar.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
# i = 0
for i, var in zip(range(len(vars)),vars):
try:
if getLanguage() in var.translate:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.translate[getLanguage()]))
else:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name))
except:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name))
if var.type in ["float","int", "string"]:
edit = QtWidgets.QLineEdit()
if var.type == "float":
edit.setValidator(QtGui.QDoubleValidator())
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('number')))
elif var.type == "int":
edit.setValidator(QtGui.QIntValidator())
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('number')))
else:
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('string')))
if var.type == "string":
edit.setText(var.defaul.replace('\"', ''))
else:
edit.setText(var.defaul)
self.tabVar.setCellWidget(i, 1, edit)
elif var.type == "boolean":
combobox = QtWidgets.QComboBox()
combobox.addItem("True")
combobox.addItem("False")
if var.defaul in ("0", "False"):
combobox.setCurrentIndex(1)
else:
combobox.setCurrentIndex(0)
self.tabVar.setCellWidget(i, 1, combobox)
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('boolean')))
elif var.type == "list":
values = var.translateValues[getLanguage()]
combobox = QtWidgets.QComboBox()
combobox.addItems(values)
self.tabVar.setCellWidget(i, 1, combobox)
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('list')))
elif var.type == "apriltext":
dictApriText = getAprilTextDict()
combobox = QtWidgets.QComboBox()
combobox.addItems([x for x in dictApriText])
self.tabVar.setCellWidget(i, 1, combobox)
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('apriltext')))
combobox = QtWidgets.QComboBox()
combobox.addItem(self.tr('Constant'))
self.tabVar.setCellWidget(i, 2, combobox)
# self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(var.type))
# i += 1
if self.popMenu is not None:
del self.popMenu
del self.keyPressEater
self.popMenu = QtWidgets.QMenu(self)
self.keyPressEater = KeyPressEater(self.popMenu)
self.popMenu.installEventFilter(self.keyPressEater)
action1 = QtWidgets.QAction(self.tr('Edit'), self)
action1.triggered.connect(self.on_clicked_menu_edit)
self.popMenu.addAction(action1)
if self.parentBlock.name not in ["main", "when"]:
if self.parentBlock.type is USERFUNCTION and self.parentBlock.typeBlock is COMPLEXBLOCK:
action3 = QtWidgets.QAction(self.tr('Export Block'), self)
action3.triggered.connect(self.on_clicked_menu_export_block)
self.popMenu.addAction(action3)
else:
action0 = QtWidgets.QAction(self.tr('Duplicate'), self)
action0.triggered.connect(self.on_clicked_menu_duplicate)
self.popMenu.addAction(action0)
self.popMenu.addSeparator()
action2 = QtWidgets.QAction(self.tr('Delete'), self)
action2.triggered.connect(self.on_clicked_menu_delete)
# action2.installEventFilter(self.keyPressEater)
self.popMenu.addAction(action2)
def on_clicked_menu_export_block(self):
if self.parentBlock.name not in ["main", "when"] and self.parentBlock.type is USERFUNCTION and self.parentBlock.typeBlock is COMPLEXBLOCK:
self.scene.stopAllblocks()
path = QtWidgets.QFileDialog.getExistingDirectory(self, self.tr('Select Library'), self.scene.parent.libraryPath, QtWidgets.QFileDialog.ShowDirsOnly)
self.scene.startAllblocks()
ret = None
try:
os.mkdir(os.path.join(path, self.parentBlock.name))
except:
msgBox = QtWidgets.QMessageBox()
msgBox.setWindowTitle(self.tr("Warning"))
msgBox.setIcon(QtWidgets.QMessageBox.Warning)
msgBox.setText(self.tr("This module already exists"))
msgBox.setInformativeText(self.tr("Do you want to overwrite the changes?"))
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok| QtWidgets.QMessageBox.Cancel)
msgBox.setDefaultButton(QtWidgets.QMessageBox.Ok)
ret = msgBox.exec_()
if ret is None or ret == QtWidgets.QMessageBox.Ok:
path = os.path.join(path, self.parentBlock.name)
# Save blockProject
lBInstance = self.scene.parent
with open(os.path.join(path, self.parentBlock.name + ".blockProject"), 'wb') as fichero:
dic = copy.deepcopy(lBInstance.scene.dicBlockItem)
for id in dic:
block = dic[id]
block.file = os.path.basename(block.file)
pickle.dump(
(dic, lBInstance.listNameWhens, lBInstance.listUserFunctions, lBInstance.listNameVars, lBInstance.listNameUserFunctions),
fichero, 0)
# Save config block
dictBlock = {}
dictBlock["name"] = self.parentBlock.name
dictBlock["type"] = "library"
dictBlock["shape"] = ["blockVertical"]
with open(os.path.join(path, self.parentBlock.name + ".conf"),'w') as f:
json.dump([dictBlock], f)
# Save script learnCode
inst = self.getInstructions()
code = "def " + toLBotPy(inst) + "\nend\n\n"
with open(os.path.join(path, self.parentBlock.name + ".lb"), 'w') as f:
f.write(code)
# Save script python
codePython = parserLearntBotCodeOnlyUserFuntion(code)
with open(os.path.join(path, self.parentBlock.name + ".py"), 'w') as f:
f.write(codePython)
pass
def on_clicked_menu_duplicate(self):
if self.parentBlock.name not in ["main", "when"] and not (self.parentBlock.type is USERFUNCTION and self.parentBlock.typeBlock is COMPLEXBLOCK):
self.duplicate()
self.scene.startAllblocks()
self.scene.parent.savetmpProject()
def duplicate(self, old_id=None, id=None, connection=None):
blockDuplicate = self.parentBlock.copy()
blockDuplicate.setPos(self.parentBlock.pos + QtCore.QPointF(50, 50))
self.scene.addItem(blockDuplicate, False, False, False)
id_new = blockDuplicate.id
new_connection = None
for c in blockDuplicate.connections:
if id is None and c.getType() in [TOP, LEFT]:
c.setItem(None)
c.setConnect(None)
elif old_id is not None and c.getIdItem() == old_id:
new_connection = c
c.setItem(id)
c.setConnect(connection)
elif c.getIdItem() is not None and c.getType() not in [TOP, LEFT]:
c_new, id_new2 = self.scene.getVisualItem(c.getIdItem()).duplicate(self.id, id_new, c)
c.setConnect(c_new)
c.setItem(id_new2)
return new_connection, id_new
def on_clicked_menu_edit(self):
self.scene.setIdItemSelected(None)
if self.DialogVar is not None and len(self.parentBlock.getVars())>0:
self.setCurrentParamInDialog()
self.DialogVar.open()
self.scene.setTable(self.DialogVar)
def setCurrentParamInDialog(self):
varS = self.parentBlock.getVars()
if len(varS)>0:
combo = self.tabVar.cellWidget(0, 2)
assignList = [combo.itemText(i) for i in range(combo.count())]
for cell, var in zip(range(len(varS)), varS):
if varS[cell].defaul in assignList:
index = assignList.index(varS[cell].defaul)
self.tabVar.cellWidget(cell, 2).setCurrentIndex(index)
if var.type in ["float","int", "string"]:
self.tabVar.cellWidget(cell, 1).setText("")
else:
self.tabVar.cellWidget(cell, 1).setCurrentIndex(0)
def on_clicked_menu_delete(self):
self.delete()
def start(self):
self.timer.start(5)
def stop(self):
self.timer.stop()
def activeUpdateConections(self):
self.shouldUpdateConnections = True
def getNameFuntion(self):
return self.parentBlock.name
def getIdItemBottomConnect(self):
for c in [conn for conn in self.connections if conn.getType() is BOTTOM]:
return self.scene.getVisualItem(c.getIdItem())
def getIdItemTopConnect(self):
for c in [conn for conn in self.connections if conn.getType() is TOP]:
return self.scene.getVisualItem(c.getIdItem())
def getNumSubBottom(self, n=0, size=0):
size += self.img.height() - 5
for c in [conn for conn in self.connections if conn.getType() is BOTTOM]:
if c.getConnect() is None:
return n + 1, size + 1
else:
return self.scene.getVisualItem(c.getIdItem()).getNumSubBottom(n + 1, size)
return n + 1, size + 1
def getNumSub(self, n=0):
for c in [conn for conn in self.connections if conn.getType() is BOTTOMIN and conn.getConnect() is not None]:
return self.scene.getVisualItem(c.getIdItem()).getNumSubBottom()
return 0, 0
def getInstructionsRIGHT(self, inst=[]):
for c in [conn for conn in self.connections if conn.getType() is RIGHT and conn.getIdItem() is not None]:
inst = self.scene.getVisualItem(c.getIdItem()).getInstructions()
if len(inst) is 0:
return None
return inst
def getInstructionsBOTTOM(self, inst=[]):
for c in [conn for conn in self.connections if conn.getType() is BOTTOM and conn.getIdItem() is not None]:
inst = self.scene.getVisualItem(c.getIdItem()).getInstructions()
if len(inst) is 0:
return None
return inst
def getInstructionsBOTTOMIN(self, inst=[]):
for c in [conn for conn in self.connections if conn.getType() is BOTTOMIN and conn.getIdItem() is not None]:
inst = self.scene.getVisualItem(c.getIdItem()).getInstructions()
if len(inst) is 0:
return None
return inst
def getVars(self):
vars = []
varS = self.parentBlock.getVars()
# for cell in range(0, self.tabVar.rowCount()):
for cell, var in zip(range(len(varS)), varS):
if self.tabVar.cellWidget(cell, 2).currentText() == self.tr('Constant'):
if self.tabVar.cellWidget(cell, 3).text() == "boolean":
vars.append(self.tabVar.cellWidget(cell, 1).currentText())
elif self.tabVar.cellWidget(cell, 3).text() == "list":
vars.append('"' + var.values[self.tabVar.cellWidget(cell, 1).currentIndex()] + '"')
elif self.tabVar.cellWidget(cell, 3).text() == "apriltext":
vars.append('"' +self.tabVar.cellWidget(cell, 1).currentText() + '"')
elif self.tabVar.cellWidget(cell, 3).text() == "string":
vars.append('"'+self.tabVar.cellWidget(cell, 1).text()+'"')
else:
vars.append(self.tabVar.cellWidget(cell, 1).text())
else:
vars.append(self.tabVar.cellWidget(cell, 2).currentText())
if len(vars) is 0:
vars = None
return vars
def getInstructions(self, inst=[]):
instRight = self.getInstructionsRIGHT()
instBottom = self.getInstructionsBOTTOM()
instBottomIn = self.getInstructionsBOTTOMIN()
nameControl = self.parentBlock.nameControl
if nameControl is "":
nameControl = None
dic = {}
dic["NAMECONTROL"] = nameControl
dic["RIGHT"] = instRight
dic["BOTTOM"] = instBottom
dic["BOTTOMIN"] = instBottomIn
dic["VARIABLES"] = self.getVars()
dic["TYPE"] = self.__type
dic["VISUALBLOCK"] = self
return self.getNameFuntion(), dic
def getId(self):
return self.parentBlock.id
def updateImg(self, force=False):
if self.__typeBlock is COMPLEXBLOCK:
nSubBlock, size = self.getNumSub()
else:
size = 34
if size is 0:
size = 34
if self.sizeIn != size or self.shouldUpdate or force:
self.sizeIn = size
im = generateBlock(self.cvImg, size, self.showtext, self.__typeBlock, None, self.getVars(), self.__type,
self.parentBlock.nameControl)
if self.highlighted:
im = generate_error_block(im)
if not self.isEnabled():
r, g, b, a = cv2.split(im)
im = cv2.cvtColor(im, cv2.COLOR_RGBA2GRAY)
im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
r, g, b= cv2.split(im)
im = cv2.merge((r, g, b, a))
qImage = toQImage(im)
self.img = QtGui.QPixmap(qImage)
self.updatePixmap()
if self.sizeIn != size or self.shouldUpdate:
for c in self.connections:
if c.getType() is BOTTOM:
c.setPoint(QtCore.QPointF(c.getPoint().x(), im.shape[0] - 5))
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(
self.pos() + QtCore.QPointF(0, self.img.height() - 5))
if c.getType() is RIGHT:
c.setPoint(QtCore.QPointF(im.shape[1] - 5, c.getPoint().y()))
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(
self.pos() + QtCore.QPointF(self.img.width() - 5, 0))
self.shouldUpdate = False
def updateVarValues(self):
vars = self.getVars()
prev_vars = self.parentBlock.getVars()
if vars is not None:
for i in range(0, len(vars)):
if vars[i] != prev_vars[i].defaul:
self.shouldUpdate = True
self.parentBlock.updateVars(vars)
break
def updateConnections(self):
for c in [conn for conn in self.connections if conn.getConnect() is not None and EuclideanDist(conn.getPosPoint(), conn.getConnect().getPosPoint()) > 7]:
c.getConnect().setItem(None)
c.getConnect().setConnect(None)
c.setItem(None)
c.setConnect(None)
def update(self):
if len(self.dicTrans) is not 0 and self.showtext is not self.dicTrans[getLanguage()]:
#Language change
self.create_dialogs()
self.shouldUpdate = True
self.showtext = self.dicTrans[getLanguage()]
vars = self.parentBlock.vars
for i, var in zip(range(len(vars)), vars):
try:
if getLanguage() in var.translate:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.translate[getLanguage()]))
else:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name))
if var.type == "list":
values = var.translateValues[getLanguage()]
val = self.tabVar.cellWidget(i, 1).currentIndex()
combobox = QtWidgets.QComboBox()
combobox.addItems(values)
self.tabVar.setCellWidget(i, 1, combobox)
combobox.setCurrentIndex(val)
except:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name))
for row in range(0, self.tabVar.rowCount()):
combobox = self.tabVar.cellWidget(row, 2)
items = []
for i in reversed(range(1, combobox.count())):
items.append(combobox.itemText(i))
if combobox.itemText(i) not in self.scene.parent.listNameVars:
combobox.removeItem(i)
combobox.setCurrentIndex(0)
for var in self.scene.parent.listNameVars:
if var not in items:
combobox.addItem(var)
self.updateVarValues()
self.updateImg()
if self.shouldUpdateConnections:
self.updateConnections()
def moveToPos(self, pos, connect=False):
if self.highlighted:
self.unhighlight()
self.clearNotifications()
if connect is False and self.posmouseinItem is not None:
pos = pos - self.posmouseinItem
self.setPos(pos)
self.parentBlock.setPos(copy.deepcopy(self.pos()))
self.scene.activeShouldSave()
for c in self.connections:
if c.getType() in (TOP, LEFT) and self is self.scene.getItemSelected() and connect is not True:
if c.getIdItem() is not None:
c.getConnect().setItem(None)
c.getConnect().setConnect(None)
c.setItem(None)
c.setConnect(None)
elif c.getType() is BOTTOM:
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(
self.pos() + QtCore.QPointF(0, self.img.height() - 5), connect)
elif c.getType() is BOTTOMIN:
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(self.pos() + QtCore.QPointF(17, 33), connect)
elif c.getType() is RIGHT:
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(
self.pos() + QtCore.QPointF(self.img.width() - 5, 0), connect)
def getLastItem(self):
for c in [conn for conn in self.connections if conn.getType() is BOTTOM]:
if c.getConnect() is None:
return c
else:
return self.scene.getVisualItem(c.getIdItem()).getLastItem()
return None
def getLastRightItem(self):
for c in [conn for conn in self.connections if conn.getType() is RIGHT]:
if c.getConnect() is None:
return c
else:
return self.scene.getVisualItem(c.getIdItem()).getLastRightItem()
return None
def moveToFront(self):
self.setZValue(1)
for c in [conn for conn in self.connections if conn.getType() in [BOTTOM, BOTTOMIN] and conn.getConnect() is not None]:
self.scene.getVisualItem(c.getIdItem()).moveToFront()
def mouseMoveEvent(self, event):
if self.isEnabled():
self.setPos(event.scenePos() - self.posmouseinItem)
self.parentBlock.setPos(self.pos())
self.scene.activeShouldSave()
def mousePressEvent(self, event):
if self.isEnabled():
if event.button() is QtCore.Qt.MouseButton.LeftButton:
self.posmouseinItem = event.scenePos() - self.pos()
self.scene.setIdItemSelected(self.id)
if self.DialogVar is not None:
self.DialogVar.close()
if event.button() is QtCore.Qt.MouseButton.RightButton:
self.popMenu.exec_(event.screenPos())
def mouseDoubleClickEvent(self, event):
if self.isEnabled():
if event.button() is QtCore.Qt.MouseButton.LeftButton:
self.scene.setIdItemSelected(None)
if self.DialogVar is not None:
self.DialogVar.open()
self.scene.setTable(self.DialogVar)
if event.button() is QtCore.Qt.MouseButton.RightButton:
pass
def mouseReleaseEvent(self, event):
if self.isEnabled():
if event.button() is QtCore.Qt.MouseButton.LeftButton:
self.posmouseinItem = None
self.scene.setIdItemSelected(None)
if event.button() is QtCore.Qt.MouseButton.RightButton:
self.posmouseinItem = None
self.scene.setIdItemSelected(None)
pass
def delete(self, savetmp=True):
self.DialogVar.close()
del self.cvImg
del self.img
del self.foot
del self.header
del self.timer
del self.DialogVar
for c in [conn for conn in self.connections if conn.getIdItem() is not None]:
if c.getType() in [BOTTOM, BOTTOMIN, RIGHT]:
self.scene.getVisualItem(c.getIdItem()).delete(savetmp=False)
else:
c.getConnect().setConnect(None)
c.getConnect().setItem(None)
if self.parentBlock.name == "when":
self.scene.parent.delWhen(self.parentBlock.nameControl)
if self.parentBlock.name == "main" and self.scene.parent.mainButton is not None:
self.scene.parent.mainButton.setEnabled(True)
self.scene.removeItem(self.id, savetmp)
del self.parentBlock
del self
def isBlockDef(self):
if self.parentBlock.name == "when":
return True
if len([conn for conn in self.connections if conn.getType() in [TOP, BOTTOM, RIGHT, LEFT]])>0:
return False
return True
def setEnabledDependentBlocks(self,enable):
self.shouldUpdate = True
self.setEnabled(enable)
for c in [conn for conn in self.connections if conn.getIdItem() is not None and conn.getType() not in [TOP, LEFT]]:
self.scene.getVisualItem(c.getIdItem()).setEnabledDependentBlocks(enable)
|
robocomp/learnbot
|
learnbot_dsl/learnbotCode/VisualBlock.py
|
Python
|
gpl-3.0
| 29,715
|
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import versioneer
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args + ' test')
sys.exit(errno)
cmd_classes = versioneer.get_cmdclass()
cmd_classes['test'] = PyTest
setup(
name="kinderstadt-registry",
version=versioneer.get_version(),
cmdclass=cmd_classes,
packages=find_packages(),
install_requires=[
'alembic==0.7.6',
'click==4.0',
'fake-factory==0.5.2',
'Flask-Migrate==1.4.0',
'Flask-SQLAlchemy==2.0',
'Flask-WTF==0.11',
'Flask==0.10.1',
'path.py==7.3',
'pgcli==0.17.0',
'python-stdnum==1.1',
'SQLAlchemy-Searchable==0.9.3',
'SQLAlchemy-Utils==0.30.12',
],
extras_require={
'devel': [
'ansible',
'autopep8',
'flake8',
'ipython',
],
},
tests_require=[
'pytest',
'testing.postgresql'
],
entry_points={
'console_scripts': [
'registry=registry.cli:main'
]
}
)
|
arsgeografica/kinderstadt-registry
|
setup.py
|
Python
|
gpl-3.0
| 1,548
|
"""
Test Result
-----------
Provides a TextTestResult that extends unittest._TextTestResult to
provide support for error classes (such as the builtin skip and
deprecated classes), and hooks for plugins to take over or extend
reporting.
"""
import logging
from unittest import _TextTestResult
from nose.config import Config
from nose.util import isclass, ln as _ln # backwards compat
log = logging.getLogger('nose.result')
def _exception_detail(exc):
# this is what stdlib module traceback does
try:
return str(exc)
except:
return '<unprintable %s object>' % type(exc).__name__
class TextTestResult(_TextTestResult):
"""Text test result that extends unittest's default test result
support for a configurable set of errorClasses (eg, Skip,
Deprecated, TODO) that extend the errors/failures/success triad.
"""
def __init__(self, stream, descriptions, verbosity, config=None,
errorClasses=None):
if errorClasses is None:
errorClasses = {}
self.errorClasses = errorClasses
if config is None:
config = Config()
self.config = config
_TextTestResult.__init__(self, stream, descriptions, verbosity)
def addError(self, test, err):
"""Overrides normal addError to add support for
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
stream = getattr(self, 'stream', None)
ec, ev, tb = err
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3 compat
exc_info = self._exc_info_to_string(err)
for cls, (storage, label, isfail) in self.errorClasses.items():
if isclass(ec) and issubclass(ec, cls):
if isfail:
test.passed = False
storage.append((test, exc_info))
# Might get patched into a streamless result
if stream is not None:
if self.showAll:
message = [label]
detail = _exception_detail(err[1])
if detail:
message.append(detail)
stream.writeln(": ".join(message))
elif self.dots:
stream.write(label[:1])
return
self.errors.append((test, exc_info))
test.passed = False
if stream is not None:
if self.showAll:
self.stream.writeln('ERROR')
elif self.dots:
stream.write('E')
def printErrors(self):
"""Overrides to print all errorClasses errors as well.
"""
_TextTestResult.printErrors(self)
for cls in self.errorClasses.keys():
storage, label, isfail = self.errorClasses[cls]
if isfail:
self.printErrorList(label, storage)
# Might get patched into a result with no config
if hasattr(self, 'config'):
self.config.plugins.report(self.stream)
def printSummary(self, start, stop):
"""Called by the test runner to print the final summary of test
run results.
"""
write = self.stream.write
writeln = self.stream.writeln
taken = float(stop - start)
run = self.testsRun
plural = run != 1 and "s" or ""
writeln(self.separator2)
writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
writeln()
summary = {}
eckeys = self.errorClasses.keys()
eckeys.sort()
for cls in eckeys:
storage, label, isfail = self.errorClasses[cls]
count = len(storage)
if not count:
continue
summary[label] = count
if len(self.failures):
summary['failures'] = len(self.failures)
if len(self.errors):
summary['errors'] = len(self.errors)
if not self.wasSuccessful():
write("FAILED")
else:
write("OK")
items = summary.items()
if items:
items.sort()
write(" (")
write(", ".join(["%s=%s" % (label, count) for
label, count in items]))
writeln(")")
else:
writeln()
def wasSuccessful(self):
"""Overrides to check that there are no errors in errorClasses
lists that are marked as errors and should cause a run to
fail.
"""
if self.errors or self.failures:
return False
for cls in self.errorClasses.keys():
storage, label, isfail = self.errorClasses[cls]
if not isfail:
continue
if storage:
return False
return True
def _addError(self, test, err):
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3: does not take test arg
exc_info = self._exc_info_to_string(err)
self.errors.append((test, exc_info))
if self.showAll:
self.stream.write('ERROR')
elif self.dots:
self.stream.write('E')
def _exc_info_to_string(self, err, test=None):
# 2.3/2.4 -- 2.4 passes test, 2.3 does not
try:
return _TextTestResult._exc_info_to_string(self, err, test)
except TypeError:
# 2.3: does not take test arg
return _TextTestResult._exc_info_to_string(self, err)
def ln(*arg, **kw):
from warnings import warn
warn("ln() has moved to nose.util from nose.result and will be removed "
"from nose.result in a future release. Please update your imports ",
DeprecationWarning)
return _ln(*arg, **kw)
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/nose-0.11.1-py2.7.egg/nose/result.py
|
Python
|
gpl-3.0
| 5,943
|
# -*- test-case-name: twisted.conch.test.test_userauth -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of the ssh-userauth service.
Currently implemented authentication types are public-key and password.
Maintainer: Paul Swartz
"""
import struct, warnings
from twisted.conch import error, interfaces
from twisted.conch.ssh import keys, transport, service
from twisted.conch.ssh.common import NS, getNS
from twisted.cred import credentials
from twisted.cred.error import UnauthorizedLogin
from twisted.internet import defer, reactor
from twisted.python import failure, log, util
class SSHUserAuthServer(service.SSHService):
"""
A service implementing the server side of the 'ssh-userauth' service. It
is used to authenticate the user on the other side as being able to access
this server.
@ivar name: the name of this service: 'ssh-userauth'
@type name: C{str}
@ivar authenticatedWith: a list of authentication methods that have
already been used.
@type authenticatedWith: C{list}
@ivar loginTimeout: the number of seconds we wait before disconnecting
the user for taking too long to authenticate
@type loginTimeout: C{int}
@ivar attemptsBeforeDisconnect: the number of failed login attempts we
allow before disconnecting.
@type attemptsBeforeDisconnect: C{int}
@ivar loginAttempts: the number of login attempts that have been made
@type loginAttempts: C{int}
@ivar passwordDelay: the number of seconds to delay when the user gives
an incorrect password
@type passwordDelay: C{int}
@ivar interfaceToMethod: a C{dict} mapping credential interfaces to
authentication methods. The server checks to see which of the
cred interfaces have checkers and tells the client that those methods
are valid for authentication.
@type interfaceToMethod: C{dict}
@ivar supportedAuthentications: A list of the supported authentication
methods.
@type supportedAuthentications: C{list} of C{str}
@ivar user: the last username the client tried to authenticate with
@type user: C{str}
@ivar method: the current authentication method
@type method: C{str}
@ivar nextService: the service the user wants started after authentication
has been completed.
@type nextService: C{str}
@ivar portal: the L{twisted.cred.portal.Portal} we are using for
authentication
@type portal: L{twisted.cred.portal.Portal}
@ivar clock: an object with a callLater method. Stubbed out for testing.
"""
name = 'ssh-userauth'
loginTimeout = 10 * 60 * 60
# 10 minutes before we disconnect them
attemptsBeforeDisconnect = 20
# 20 login attempts before a disconnect
passwordDelay = 1 # number of seconds to delay on a failed password
clock = reactor
interfaceToMethod = {
credentials.ISSHPrivateKey : 'publickey',
credentials.IUsernamePassword : 'password',
credentials.IPluggableAuthenticationModules : 'keyboard-interactive',
}
def serviceStarted(self):
"""
Called when the userauth service is started. Set up instance
variables, check if we should allow password/keyboard-interactive
authentication (only allow if the outgoing connection is encrypted) and
set up a login timeout.
"""
self.authenticatedWith = []
self.loginAttempts = 0
self.user = None
self.nextService = None
self._pamDeferred = None
self.portal = self.transport.factory.portal
self.supportedAuthentications = []
for i in self.portal.listCredentialsInterfaces():
if i in self.interfaceToMethod:
self.supportedAuthentications.append(self.interfaceToMethod[i])
if not self.transport.isEncrypted('in'):
# don't let us transport password in plaintext
if 'password' in self.supportedAuthentications:
self.supportedAuthentications.remove('password')
if 'keyboard-interactive' in self.supportedAuthentications:
self.supportedAuthentications.remove('keyboard-interactive')
self._cancelLoginTimeout = self.clock.callLater(
self.loginTimeout,
self.timeoutAuthentication)
def serviceStopped(self):
"""
Called when the userauth service is stopped. Cancel the login timeout
if it's still going.
"""
if self._cancelLoginTimeout:
self._cancelLoginTimeout.cancel()
self._cancelLoginTimeout = None
def timeoutAuthentication(self):
"""
Called when the user has timed out on authentication. Disconnect
with a DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE message.
"""
self._cancelLoginTimeout = None
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'you took too long')
def tryAuth(self, kind, user, data):
"""
Try to authenticate the user with the given method. Dispatches to a
auth_* method.
@param kind: the authentication method to try.
@type kind: C{str}
@param user: the username the client is authenticating with.
@type user: C{str}
@param data: authentication specific data sent by the client.
@type data: C{str}
@return: A Deferred called back if the method succeeded, or erred back
if it failed.
@rtype: C{defer.Deferred}
"""
log.msg('%s trying auth %s' % (user, kind))
if kind not in self.supportedAuthentications:
return defer.fail(
error.ConchError('unsupported authentication, failing'))
kind = kind.replace('-', '_')
f = getattr(self,'auth_%s'%kind, None)
if f:
ret = f(data)
if not ret:
return defer.fail(
error.ConchError('%s return None instead of a Deferred'
% kind))
else:
return ret
return defer.fail(error.ConchError('bad auth type: %s' % kind))
def ssh_USERAUTH_REQUEST(self, packet):
"""
The client has requested authentication. Payload::
string user
string next service
string method
<authentication specific data>
@type packet: C{str}
"""
user, nextService, method, rest = getNS(packet, 3)
if user != self.user or nextService != self.nextService:
self.authenticatedWith = [] # clear auth state
self.user = user
self.nextService = nextService
self.method = method
d = self.tryAuth(method, user, rest)
if not d:
self._ebBadAuth(
failure.Failure(error.ConchError('auth returned none')))
return
d.addCallback(self._cbFinishedAuth)
d.addErrback(self._ebMaybeBadAuth)
d.addErrback(self._ebBadAuth)
return d
def _cbFinishedAuth(self, (interface, avatar, logout)):
"""
The callback when user has successfully been authenticated. For a
description of the arguments, see L{twisted.cred.portal.Portal.login}.
We start the service requested by the user.
"""
self.transport.avatar = avatar
self.transport.logoutFunction = logout
service = self.transport.factory.getService(self.transport,
self.nextService)
if not service:
raise error.ConchError('could not get next service: %s'
% self.nextService)
log.msg('%s authenticated with %s' % (self.user, self.method))
self.transport.sendPacket(MSG_USERAUTH_SUCCESS, '')
self.transport.setService(service())
def _ebMaybeBadAuth(self, reason):
"""
An intermediate errback. If the reason is
error.NotEnoughAuthentication, we send a MSG_USERAUTH_FAILURE, but
with the partial success indicator set.
@type reason: L{twisted.python.failure.Failure}
"""
reason.trap(error.NotEnoughAuthentication)
self.transport.sendPacket(MSG_USERAUTH_FAILURE,
NS(','.join(self.supportedAuthentications)) + '\xff')
def _ebBadAuth(self, reason):
"""
The final errback in the authentication chain. If the reason is
error.IgnoreAuthentication, we simply return; the authentication
method has sent its own response. Otherwise, send a failure message
and (if the method is not 'none') increment the number of login
attempts.
@type reason: L{twisted.python.failure.Failure}
"""
if reason.check(error.IgnoreAuthentication):
return
if self.method != 'none':
log.msg('%s failed auth %s' % (self.user, self.method))
if reason.check(UnauthorizedLogin):
log.msg('unauthorized login: %s' % reason.getErrorMessage())
elif reason.check(error.ConchError):
log.msg('reason: %s' % reason.getErrorMessage())
else:
log.msg(reason.getTraceback())
self.loginAttempts += 1
if self.loginAttempts > self.attemptsBeforeDisconnect:
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'too many bad auths')
return
self.transport.sendPacket(
MSG_USERAUTH_FAILURE,
NS(','.join(self.supportedAuthentications)) + '\x00')
def auth_publickey(self, packet):
"""
Public key authentication. Payload::
byte has signature
string algorithm name
string key blob
[string signature] (if has signature is True)
Create a SSHPublicKey credential and verify it using our portal.
"""
hasSig = ord(packet[0])
algName, blob, rest = getNS(packet[1:], 2)
pubKey = keys.Key.fromString(blob)
signature = hasSig and getNS(rest)[0] or None
if hasSig:
b = (NS(self.transport.sessionID) + chr(MSG_USERAUTH_REQUEST) +
NS(self.user) + NS(self.nextService) + NS('publickey') +
chr(hasSig) + NS(pubKey.sshType()) + NS(blob))
c = credentials.SSHPrivateKey(self.user, algName, blob, b,
signature)
return self.portal.login(c, None, interfaces.IConchUser)
else:
c = credentials.SSHPrivateKey(self.user, algName, blob, None, None)
return self.portal.login(c, None,
interfaces.IConchUser).addErrback(self._ebCheckKey,
packet[1:])
def _ebCheckKey(self, reason, packet):
"""
Called back if the user did not sent a signature. If reason is
error.ValidPublicKey then this key is valid for the user to
authenticate with. Send MSG_USERAUTH_PK_OK.
"""
reason.trap(error.ValidPublicKey)
# if we make it here, it means that the publickey is valid
self.transport.sendPacket(MSG_USERAUTH_PK_OK, packet)
return failure.Failure(error.IgnoreAuthentication())
def auth_password(self, packet):
"""
Password authentication. Payload::
string password
Make a UsernamePassword credential and verify it with our portal.
"""
password = getNS(packet[1:])[0]
c = credentials.UsernamePassword(self.user, password)
return self.portal.login(c, None, interfaces.IConchUser).addErrback(
self._ebPassword)
def _ebPassword(self, f):
"""
If the password is invalid, wait before sending the failure in order
to delay brute-force password guessing.
"""
d = defer.Deferred()
self.clock.callLater(self.passwordDelay, d.callback, f)
return d
def auth_keyboard_interactive(self, packet):
"""
Keyboard interactive authentication. No payload. We create a
PluggableAuthenticationModules credential and authenticate with our
portal.
"""
if self._pamDeferred is not None:
self.transport.sendDisconnect(
transport.DISCONNECT_PROTOCOL_ERROR,
"only one keyboard interactive attempt at a time")
return defer.fail(error.IgnoreAuthentication())
c = credentials.PluggableAuthenticationModules(self.user,
self._pamConv)
return self.portal.login(c, None, interfaces.IConchUser)
def _pamConv(self, items):
"""
Convert a list of PAM authentication questions into a
MSG_USERAUTH_INFO_REQUEST. Returns a Deferred that will be called
back when the user has responses to the questions.
@param items: a list of 2-tuples (message, kind). We only care about
kinds 1 (password) and 2 (text).
@type items: C{list}
@rtype: L{defer.Deferred}
"""
resp = []
for message, kind in items:
if kind == 1: # password
resp.append((message, 0))
elif kind == 2: # text
resp.append((message, 1))
elif kind in (3, 4):
return defer.fail(error.ConchError(
'cannot handle PAM 3 or 4 messages'))
else:
return defer.fail(error.ConchError(
'bad PAM auth kind %i' % kind))
packet = NS('') + NS('') + NS('')
packet += struct.pack('>L', len(resp))
for prompt, echo in resp:
packet += NS(prompt)
packet += chr(echo)
self.transport.sendPacket(MSG_USERAUTH_INFO_REQUEST, packet)
self._pamDeferred = defer.Deferred()
return self._pamDeferred
def ssh_USERAUTH_INFO_RESPONSE(self, packet):
"""
The user has responded with answers to PAMs authentication questions.
Parse the packet into a PAM response and callback self._pamDeferred.
Payload::
uint32 numer of responses
string response 1
...
string response n
"""
d, self._pamDeferred = self._pamDeferred, None
try:
resp = []
numResps = struct.unpack('>L', packet[:4])[0]
packet = packet[4:]
while len(resp) < numResps:
response, packet = getNS(packet)
resp.append((response, 0))
if packet:
raise error.ConchError("%i bytes of extra data" % len(packet))
except:
d.errback(failure.Failure())
else:
d.callback(resp)
class SSHUserAuthClient(service.SSHService):
"""
A service implementing the client side of 'ssh-userauth'.
@ivar name: the name of this service: 'ssh-userauth'
@type name: C{str}
@ivar preferredOrder: a list of authentication methods we support, in
order of preference. The client will try authentication methods in
this order, making callbacks for information when necessary.
@type preferredOrder: C{list}
@ivar user: the name of the user to authenticate as
@type user: C{str}
@ivar instance: the service to start after authentication has finished
@type instance: L{service.SSHService}
@ivar authenticatedWith: a list of strings of authentication methods we've tried
@type authenticatedWith: C{list} of C{str}
@ivar triedPublicKeys: a list of public key objects that we've tried to
authenticate with
@type triedPublicKeys: C{list} of L{Key}
@ivar lastPublicKey: the last public key object we've tried to authenticate
with
@type lastPublicKey: L{Key}
"""
name = 'ssh-userauth'
preferredOrder = ['publickey', 'password', 'keyboard-interactive']
def __init__(self, user, instance):
self.user = user
self.instance = instance
def serviceStarted(self):
self.authenticatedWith = []
self.triedPublicKeys = []
self.lastPublicKey = None
self.askForAuth('none', '')
def askForAuth(self, kind, extraData):
"""
Send a MSG_USERAUTH_REQUEST.
@param kind: the authentication method to try.
@type kind: C{str}
@param extraData: method-specific data to go in the packet
@type extraData: C{str}
"""
self.lastAuth = kind
self.transport.sendPacket(MSG_USERAUTH_REQUEST, NS(self.user) +
NS(self.instance.name) + NS(kind) + extraData)
def tryAuth(self, kind):
"""
Dispatch to an authentication method.
@param kind: the authentication method
@type kind: C{str}
"""
kind = kind.replace('-', '_')
log.msg('trying to auth with %s' % (kind,))
f = getattr(self,'auth_%s' % (kind,), None)
if f:
return f()
def _ebAuth(self, ignored, *args):
"""
Generic callback for a failed authentication attempt. Respond by
asking for the list of accepted methods (the 'none' method)
"""
self.askForAuth('none', '')
def ssh_USERAUTH_SUCCESS(self, packet):
"""
We received a MSG_USERAUTH_SUCCESS. The server has accepted our
authentication, so start the next service.
"""
self.transport.setService(self.instance)
def ssh_USERAUTH_FAILURE(self, packet):
"""
We received a MSG_USERAUTH_FAILURE. Payload::
string methods
byte partial success
If partial success is True, then the previous method succeeded but is
not sufficent for authentication. methods is a comma-separated list of
accepted authentication methods.
We sort the list of methods by their position in self.preferredOrder,
removing methods that have already succeeded. We then call
self.tryAuth with the most preferred method,
"""
canContinue, partial = getNS(packet)
partial = ord(partial)
if partial:
self.authenticatedWith.append(self.lastAuth)
def orderByPreference(meth):
if meth in self.preferredOrder:
return self.preferredOrder.index(meth)
else:
return -1
canContinue = util.dsu([meth for meth in canContinue.split(',')
if meth not in self.authenticatedWith],
orderByPreference)
log.msg('can continue with: %s' % canContinue)
return self._cbUserauthFailure(None, iter(canContinue))
def _cbUserauthFailure(self, result, iterator):
if result:
return
try:
method = iterator.next()
except StopIteration:
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'no more authentication methods available')
else:
d = defer.maybeDeferred(self.tryAuth, method)
d.addCallback(self._cbUserauthFailure, iterator)
return d
def ssh_USERAUTH_PK_OK(self, packet):
"""
This message (number 60) can mean several different messages depending
on the current authentication type. We dispatch to individual methods
in order to handle this request.
"""
func = getattr(self, 'ssh_USERAUTH_PK_OK_%s' %
self.lastAuth.replace('-', '_'), None)
if func is not None:
return func(packet)
else:
self.askForAuth('none', '')
def ssh_USERAUTH_PK_OK_publickey(self, packet):
"""
This is MSG_USERAUTH_PK. Our public key is valid, so we create a
signature and try to authenticate with it.
"""
publicKey = self.lastPublicKey
b = (NS(self.transport.sessionID) + chr(MSG_USERAUTH_REQUEST) +
NS(self.user) + NS(self.instance.name) + NS('publickey') +
'\xff' + NS(publicKey.sshType()) + NS(publicKey.blob()))
d = self.signData(publicKey, b)
if not d:
self.askForAuth('none', '')
# this will fail, we'll move on
return
d.addCallback(self._cbSignedData)
d.addErrback(self._ebAuth)
def ssh_USERAUTH_PK_OK_password(self, packet):
"""
This is MSG_USERAUTH_PASSWD_CHANGEREQ. The password given has expired.
We ask for an old password and a new password, then send both back to
the server.
"""
prompt, language, rest = getNS(packet, 2)
self._oldPass = self._newPass = None
d = self.getPassword('Old Password: ')
d = d.addCallbacks(self._setOldPass, self._ebAuth)
d.addCallback(lambda ignored: self.getPassword(prompt))
d.addCallbacks(self._setNewPass, self._ebAuth)
def ssh_USERAUTH_PK_OK_keyboard_interactive(self, packet):
"""
This is MSG_USERAUTH_INFO_RESPONSE. The server has sent us the
questions it wants us to answer, so we ask the user and sent the
responses.
"""
name, instruction, lang, data = getNS(packet, 3)
numPrompts = struct.unpack('!L', data[:4])[0]
data = data[4:]
prompts = []
for i in range(numPrompts):
prompt, data = getNS(data)
echo = bool(ord(data[0]))
data = data[1:]
prompts.append((prompt, echo))
d = self.getGenericAnswers(name, instruction, prompts)
d.addCallback(self._cbGenericAnswers)
d.addErrback(self._ebAuth)
def _cbSignedData(self, signedData):
"""
Called back out of self.signData with the signed data. Send the
authentication request with the signature.
@param signedData: the data signed by the user's private key.
@type signedData: C{str}
"""
publicKey = self.lastPublicKey
self.askForAuth('publickey', '\xff' + NS(publicKey.sshType()) +
NS(publicKey.blob()) + NS(signedData))
def _setOldPass(self, op):
"""
Called back when we are choosing a new password. Simply store the old
password for now.
@param op: the old password as entered by the user
@type op: C{str}
"""
self._oldPass = op
def _setNewPass(self, np):
"""
Called back when we are choosing a new password. Get the old password
and send the authentication message with both.
@param np: the new password as entered by the user
@type np: C{str}
"""
op = self._oldPass
self._oldPass = None
self.askForAuth('password', '\xff' + NS(op) + NS(np))
def _cbGenericAnswers(self, responses):
"""
Called back when we are finished answering keyboard-interactive
questions. Send the info back to the server in a
MSG_USERAUTH_INFO_RESPONSE.
@param responses: a list of C{str} responses
@type responses: C{list}
"""
data = struct.pack('!L', len(responses))
for r in responses:
data += NS(r.encode('UTF8'))
self.transport.sendPacket(MSG_USERAUTH_INFO_RESPONSE, data)
def auth_publickey(self):
"""
Try to authenticate with a public key. Ask the user for a public key;
if the user has one, send the request to the server and return True.
Otherwise, return False.
@rtype: C{bool}
"""
d = defer.maybeDeferred(self.getPublicKey)
d.addBoth(self._cbGetPublicKey)
return d
def _cbGetPublicKey(self, publicKey):
if isinstance(publicKey, str):
warnings.warn("Returning a string from "
"SSHUserAuthClient.getPublicKey() is deprecated "
"since Twisted 9.0. Return a keys.Key() instead.",
DeprecationWarning)
publicKey = keys.Key.fromString(publicKey)
if not isinstance(publicKey, keys.Key): # failure or None
publicKey = None
if publicKey is not None:
self.lastPublicKey = publicKey
self.triedPublicKeys.append(publicKey)
log.msg('using key of type %s' % publicKey.type())
self.askForAuth('publickey', '\x00' + NS(publicKey.sshType()) +
NS(publicKey.blob()))
return True
else:
return False
def auth_password(self):
"""
Try to authenticate with a password. Ask the user for a password.
If the user will return a password, return True. Otherwise, return
False.
@rtype: C{bool}
"""
d = self.getPassword()
if d:
d.addCallbacks(self._cbPassword, self._ebAuth)
return True
else: # returned None, don't do password auth
return False
def auth_keyboard_interactive(self):
"""
Try to authenticate with keyboard-interactive authentication. Send
the request to the server and return True.
@rtype: C{bool}
"""
log.msg('authing with keyboard-interactive')
self.askForAuth('keyboard-interactive', NS('') + NS(''))
return True
def _cbPassword(self, password):
"""
Called back when the user gives a password. Send the request to the
server.
@param password: the password the user entered
@type password: C{str}
"""
self.askForAuth('password', '\x00' + NS(password))
def signData(self, publicKey, signData):
"""
Sign the given data with the given public key.
By default, this will call getPrivateKey to get the private key,
then sign the data using Key.sign().
This method is factored out so that it can be overridden to use
alternate methods, such as a key agent.
@param publicKey: The public key object returned from L{getPublicKey}
@type publicKey: L{keys.Key}
@param signData: the data to be signed by the private key.
@type signData: C{str}
@return: a Deferred that's called back with the signature
@rtype: L{defer.Deferred}
"""
key = self.getPrivateKey()
if not key:
return
return key.addCallback(self._cbSignData, signData)
def _cbSignData(self, privateKey, signData):
"""
Called back when the private key is returned. Sign the data and
return the signature.
@param privateKey: the private key object
@type publicKey: L{keys.Key}
@param signData: the data to be signed by the private key.
@type signData: C{str}
@return: the signature
@rtype: C{str}
"""
if not isinstance(privateKey, keys.Key):
warnings.warn("Returning a PyCrypto key object from "
"SSHUserAuthClient.getPrivateKey() is deprecated "
"since Twisted 9.0. Return a keys.Key() instead.",
DeprecationWarning)
privateKey = keys.Key(privateKey)
return privateKey.sign(signData)
def getPublicKey(self):
"""
Return a public key for the user. If no more public keys are
available, return C{None}.
This implementation always returns C{None}. Override it in a
subclass to actually find and return a public key object.
@rtype: L{Key} or L{NoneType}
"""
return None
def getPrivateKey(self):
"""
Return a L{Deferred} that will be called back with the private key
object corresponding to the last public key from getPublicKey().
If the private key is not available, errback on the Deferred.
@rtype: L{Deferred} called back with L{Key}
"""
return defer.fail(NotImplementedError())
def getPassword(self, prompt = None):
"""
Return a L{Deferred} that will be called back with a password.
prompt is a string to display for the password, or None for a generic
'user@hostname's password: '.
@type prompt: C{str}/C{None}
@rtype: L{defer.Deferred}
"""
return defer.fail(NotImplementedError())
def getGenericAnswers(self, name, instruction, prompts):
"""
Returns a L{Deferred} with the responses to the promopts.
@param name: The name of the authentication currently in progress.
@param instruction: Describes what the authentication wants.
@param prompts: A list of (prompt, echo) pairs, where prompt is a
string to display and echo is a boolean indicating whether the
user's response should be echoed as they type it.
"""
return defer.fail(NotImplementedError())
MSG_USERAUTH_REQUEST = 50
MSG_USERAUTH_FAILURE = 51
MSG_USERAUTH_SUCCESS = 52
MSG_USERAUTH_BANNER = 53
MSG_USERAUTH_PASSWD_CHANGEREQ = 60
MSG_USERAUTH_INFO_REQUEST = 60
MSG_USERAUTH_INFO_RESPONSE = 61
MSG_USERAUTH_PK_OK = 60
messages = {}
for k, v in locals().items():
if k[:4]=='MSG_':
messages[v] = k # doesn't handle doubles
SSHUserAuthServer.protocolMessages = messages
SSHUserAuthClient.protocolMessages = messages
del messages
del v
|
Donkyhotay/MoonPy
|
twisted/conch/ssh/userauth.py
|
Python
|
gpl-3.0
| 29,730
|
# This file is part of taxtastic.
#
# taxtastic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# taxtastic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with taxtastic. If not, see <http://www.gnu.org/licenses/>.
"""Restore a change to a refpkg immediately after being reverted
Restore the last ``N`` rolled back operations on ``refpkg``, or the
last operation if ``-n`` is omitted. If there are not at least ``N``
operations that can be rolled forward on this refpkg, then an error is
returned and no changes are made to the refpkg.
Note that operations can only be rolled forward immediately after
being rolled back. If any operation besides a rollback occurs, all
roll forward information is removed.
"""
import logging
from taxtastic import refpkg
log = logging.getLogger(__name__)
def build_parser(parser):
parser.add_argument('refpkg', action='store', metavar='refpkg',
help='the reference package to operate on')
parser.add_argument('-n', action='store', metavar='int',
default=1, type=int,
help='Number of operations to roll back')
def action(args):
"""Roll forward previously rolled back commands on a refpkg.
*args* should be an argparse object with fields refpkg (giving the
path to the refpkg to operate on) and optionall n (giving the
number of operations to roll forward.
"""
log.info('loading reference package')
r = refpkg.Refpkg(args.refpkg, create=False)
# First check if we can do n rollforwards
q = r.contents
for i in range(args.n):
if q['rollforward'] is None:
log.error(
'Cannot rollforward {} changes; '
'refpkg only records {} rolled back changes.'.format(args.n, i))
return 1
else:
q = q['rollforward'][1]
for i in range(args.n):
r.rollforward()
return 0
|
fhcrc/taxtastic
|
taxtastic/subcommands/rollforward.py
|
Python
|
gpl-3.0
| 2,393
|
# -*- coding: utf-8 -*-
from asyncore import write
import serial
import sys
import time
import strutils
from datafile import DataFile
__author__ = 'Trol'
# Установка:
# python -m pip install pyserial
def _bytes(i):
return divmod(i, 0x100)
class Bootloader:
CMD_SYNC = 0
CMD_ABOUT = 1
CMD_READ_FLASH = 2
CMD_READ_EEPROM = 3
CMD_READ_FUSES = 4
CMD_START_APP = 5
CMD_ERASE_PAGE = 6
CMD_WRITE_FLASH_PAGE = 7
CMD_TRANSFER_PAGE = 8
def __init__(self, port_name, bauds):
self.serial = serial.serial_for_url(port_name, baudrate=bauds, timeout=1.0)
res1 = self.sync(1)
if res1 != 1 and res1 > 0:
cnt = 1
while True:
try:
self._read()
cnt += 1
except:
break
print 'skip', cnt, 'bytes'
self.sync(100)
def close(self):
self.serial.close()
def sync(self, val):
self._cmd(Bootloader.CMD_SYNC, val)
try:
return self._read()
except:
return -1
def get_about(self):
self._cmd(Bootloader.CMD_ABOUT)
info = {
'signature': self._read_char() + self._read_char() + self._read_char() + self._read_char(),
'version': self._read_word(),
'bootloader_start': self._read_dword(),
'bootloader_size': self._read_word(),
'page_size': self._read_word(),
'device_signature': (self._read() << 16) + (self._read() << 8) + self._read()
}
return info
def read_flash(self, addr_in_16_byte_pages, size16):
self._cmd(Bootloader.CMD_READ_FLASH, _bytes(addr_in_16_byte_pages), _bytes(size16))
result = []
for i in range(0, size16):
v = self._read()
result.append(v)
return result
def read_eeprom(self, addr16, size16):
self._cmd(Bootloader.CMD_READ_EEPROM, _bytes(addr16), _bytes(size16))
result = []
for i in range(0, size16):
v = self._read()
result.append(v)
return result
def read_fuses(self):
pass
def start_app(self):
self._cmd(Bootloader.CMD_START_APP)
def erase_page(self, page_number):
self._cmd(Bootloader.CMD_ERASE_PAGE, _bytes(page_number))
def write_flash_page(self, page_number):
self._cmd(Bootloader.CMD_WRITE_FLASH_PAGE, _bytes(page_number))
self._read() # TODO == 0 ???
def transfer_page(self, page_data):
self._cmd(Bootloader.CMD_TRANSFER_PAGE, page_data)
def _read_all(self):
while True:
try:
self.serial.read()
return
except:
return
def _write(self, b):
self.serial.write(chr(b))
def _cmd(self, *args):
for a in args:
if type(a) is tuple:
for v in a:
self._write(v)
elif type(a) is list:
for v in a:
self._write(v)
else:
self._write(a)
def _read(self):
b = ord(self.serial.read())
return b
def _read_char(self):
return self.serial.read()
def _read_word(self):
return (self._read() << 8) + self._read()
def _read_dword(self):
return (self._read_word() << 16) + self._read_word()
class Loader:
def __init__(self, port, baudrate):
self.dev = Bootloader(port, baudrate)
if self.dev.sync(123) != 123:
print "Can't connect to bootloader"
sys.exit(-1)
about = self.dev.get_about()
if about['signature'] != 'TSBL':
print "Wrong bootloader signature"
sys.exit(-1)
self.page_size = about['page_size']
self.bootloader_size = about['bootloader_size']
self.bootloader_start = about['bootloader_start']
self.firmware_pages = self.bootloader_start / self.page_size
print 'page sizes', self.page_size
print 'pages', self.firmware_pages
def read_all_flash(self, with_loader):
start = time.time()
size = self.bootloader_start
if with_loader:
size += self.bootloader_size
#return self.read_flash(0, size)
ret = self.read_flash(0, size)
tm = time.time() - start
print 'read flash time', tm, 'speed=', 1e6*tm/size, 'us/byte'
return ret
def read_flash(self, offset, size):
result = []
while size > 0:
read_size = size if size < 0xffff else 0xffff
dt = self.dev.read_flash(offset >> 4, read_size)
result.extend(dt)
offset += read_size
size -= read_size
return result
def _find_changed_pages(self, data):
if len(data) > self.bootloader_start:
data = data[0:self.bootloader_start]
pages_in_data = len(data) / self.page_size
if pages_in_data*self.page_size < len(data):
pages_in_data += 1
print 'pages_in_data', pages_in_data
read = self.read_flash(0, pages_in_data*self.page_size)
changed_pages = pages_in_data*[False]
changes_count = 0
# TODO detect if page is empty !!!
for page in range(0, pages_in_data):
data_page_is_empty = True
for o in range(0, self.page_size):
if data[page * self.page_size + o] != 0xff:
data_page_is_empty = False
break
if data_page_is_empty:
continue
for o in range(0, self.page_size):
if data[page * self.page_size + o] != read[page * self.page_size + o]:
changed_pages[page] = True
print '! offset', o, 'page', page, '->', hex(page * self.page_size + o), ' data =', hex(data[page * self.page_size + o]), 'vs readed =', hex(read[page * self.page_size + o])
changes_count += 1
break
if changes_count == 0:
print 'No changes'
else:
print 'changed pages', changes_count, 'from', len(changed_pages)
# print changed_pages
return changed_pages
def write_flash(self, data):
while len(data) < self.firmware_pages * self.page_size:
data.append(0xff)
changed_pages = self._find_changed_pages(data)
start = time.time()
write_counter = 0
for page in range(0, len(changed_pages)):
if not changed_pages[page]:
continue
self.dev.transfer_page(data[page*self.page_size:page*self.page_size + self.page_size])
#print 'erase', page
self.dev.erase_page(page)
#print 'write', page
self.dev.write_flash_page(page)
write_counter += 1
tm = time.time() - start
if write_counter > 0:
print 'write flash time', tm, 1e6*tm/write_counter/self.page_size, 'us/byte'
def read_and_save_flash(self, filename, with_bootloader):
_df = DataFile([])
_df.data = self.read_all_flash(with_bootloader)
print 'Read', len(_df.data), 'bytes'
_df.save(filename)
def print_dump(lst):
s = ''
i = 0
for v in lst:
vs = hex(v)[2:]
i += 1
if len(vs) == 1:
vs = '0' + vs
s += vs + ' '
if (i % 16) == 0:
print s
s = ''
#fw = DataFile('/Users/trol/Projects/radio/avr-lcd-module-128x128/build/avr-lcd-module-128x128.hex')
fw = DataFile('/Users/trol/Projects/radio/avr-ic-tester-v2/firmware/tester/build/ic-tester-main.hex')
# read 230400 44.0383300884 us/byte
#write 234000 256.21552423 us/byte
# 255.434597666 us/byte
#l = Loader('/dev/tty.wchusbserial14230', 57600)
#l = Loader('/dev/tty.wchusbserial14230', 230400)
l = Loader('/dev/tty.wchusbserial14220', 153600)
print l.dev.get_about()
l.read_and_save_flash('flash_with_loader.hex', True)
l.read_and_save_flash('flash_without_loader.hex', False)
l.write_flash(fw.data)
l.dev.start_app()
1/0
# df = DataFile([])
#
# df.load('flash.hex')
# df.save('flash2.hex')
dev = Bootloader('/dev/tty.wchusbserial14230', 57600)
print dev.sync(10)
print dev.get_about()
eeprom = dev.read_eeprom(0, 1024)
# flash = dev.read_flash(0x7000, 0x1000)
flash = dev.read_flash(0x0000, 32*1024)
# print_dump(flash)
print_dump(flash)
df = DataFile(flash)
df.save('flash.hex')
dev.start_app()
|
trol73/avr-bootloader
|
software/python/loader.py
|
Python
|
gpl-3.0
| 8,577
|
# coding=utf-8
"""PySnapSync client.
This package implements the pysnapsync client.
The package exports the following modules:
o `snapsync` main backup script.
See the module doc strings for more information.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from pysnapsync.client import snapsync
__all__ = [x.__name__.split(".")[-1] for x in [snapsync]]
|
dtaylor84/pysnapsync
|
pysnapsync/client/__init__.py
|
Python
|
gpl-3.0
| 427
|
#!/usr/bin/env python
"""zip source directory tree"""
import argparse
import fnmatch
import logging
import os
import re
import subprocess
import zipfile
def get_version():
command = ['git', 'describe', '--tags', '--dirty', '--always']
return subprocess.check_output(command).decode('utf-8')
def source_walk(root):
root = os.path.abspath(root)
regex = re.compile(fnmatch.translate('*.py[co]'))
for path, _, files in os.walk(root):
files[:] = [f for f in files if regex.match(f) is None]
for filename in files:
fullpath = os.path.join(path, filename)
yield fullpath, os.path.relpath(fullpath, root)
def setup():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-d', '--debug',
action='store_true',
help='print debug information')
argparser.add_argument(
'-o',
metavar='zipfile',
dest='output',
help='output file name')
argparser.add_argument(
'source',
help='source directory')
args = argparser.parse_args()
loglevel = logging.DEBUG if args.debug else logging.WARNING
logging.basicConfig(format='%(levelname)s: %(message)s', level=loglevel)
if not os.path.isdir(args.source):
logging.critical('"%s" is not a directory', args.source)
return
if args.output is None:
args.output = args.source + '.zip'
with zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED) as fzip:
fzip.writestr('version.txt', get_version())
for path, relpath in source_walk(args.source):
fzip.write(path, relpath)
if __name__ == '__main__':
setup()
|
nsubiron/configure-pyz
|
setup.py
|
Python
|
gpl-3.0
| 1,666
|
"""
Author: RedFantom
Contributors: Daethyra (Naiii) and Sprigellania (Zarainia)
License: GNU GPLv3 as in LICENSE
Copyright (C) 2016-2018 RedFantom
"""
from ast import literal_eval
def config_eval(value):
"""
Safely evaluate a string that can be a in a configuration file to a
valid Python value. Performs error handling and checks special
cases.
"""
try:
literal = literal_eval(value)
except (ValueError, SyntaxError):
return value
if literal == 1:
return True
elif literal == 0:
return False
else:
return literal
|
RedFantom/GSF-Parser
|
settings/eval.py
|
Python
|
gpl-3.0
| 597
|
#!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class MenuButton(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.connect("destroy", Gtk.main_quit)
menubutton = Gtk.MenuButton("MenuButton")
self.add(menubutton)
menu = Gtk.Menu()
menubutton.set_popup(menu)
for count in range(1, 6):
menuitem = Gtk.MenuItem("Item %i" % (count))
menuitem.connect("activate", self.on_menuitem_activated)
menu.append(menuitem)
menu.show_all()
def on_menuitem_activated(self, menuitem):
print("%s Activated" % (menuitem.get_label()))
window = MenuButton()
window.show_all()
Gtk.main()
|
hezral/Rogu
|
reference/menubutton.py
|
Python
|
gpl-3.0
| 750
|
f = open("data/planetsc.txt", "r")
earth = 0
for line in f:
planet = line.strip().lower()
if planet[0] == "#":
continue
earth += 1
if planet == "earth":
break
print "Earth is planet #%d" % earth
|
otfried/cs101
|
code/files/planets4.py
|
Python
|
gpl-3.0
| 213
|
import xmlrpclib as xml
import time
# connect to environment via XML-RPC
e = xml.ServerProxy('http://localhost:8001')
def sense():
return e.do('sense', {'agent':'Ralph'})
def step(time):
e.do('meta_step', {'seconds':time})
def do(command, args = None):
if not args:
args = {}
args['agent'] = 'Ralph'
return e.do(command, args)
layer_config = {'0': {'name': 'Reactive'}, '1':{'name':'Deliberative','color':[0.9,0.8,0.3,1]}, '2': {'name':'Reflective'}}
#layer_config = {'0': {'name': 'ID'}, '1':{'name':'Ego'}}#,'color':[0.9,0.8,0.3,1]}, '2': {'name':'Reflective'}}
print "Connected to IsisWorld"
scenarios = e.do('meta_list_scenarios')
print e.do('meta_load_scenario', {'scenario': scenarios[0]})
tasks = e.do('meta_list_tasks')
print e.do('meta_load_task', {'task': tasks[0]})
print e.do("meta_setup_thought_layers", layer_config)
print 'Going into training mode'
print e.do('meta_train')
while True:
for key, layer in layer_config.items():
msg = "Thought in the %s layer." % layer['name']
do("think",{'message':msg, 'layer': key})
time.sleep(1)
|
dasmith/IsisWorld
|
agents/test_thoughts.py
|
Python
|
gpl-3.0
| 1,119
|
# ----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Huynh Vi Lam <domovilam@gmail.com>
#
# This file is part of pimucha.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
import logging,sys
logger = logging.getLogger()
X10Checkers = {
'RF' : 'x10chk(args)' ,
'PL' : 'x10chk(args)' ,
}
X10Encoders = {
'RF' : ('', 'x10rf2hex(args)', '') ,
'PL' : ('', 'x10pl2hex(args)', '') ,
}
|
domovilam/pimucha
|
piHAparsers/x10libs/c_cm1x.py
|
Python
|
gpl-3.0
| 1,136
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2018 Jonathan Peirce
# Distributed under the terms of the GNU General Public License (GPL).
"""A Backend class defines the core low-level functions required by a Window
class, such as the ability to create an OpenGL context and flip the window.
Users simply call visual.Window(..., winType='pyglet') and the winType is then
used by backends.getBackend(winType) which will locate the appropriate class
and initialize an instance using the attributes of the Window.
"""
from __future__ import absolute_import, print_function
import sys
import os
import numpy as np
import psychopy
from psychopy import logging, event, platform_specific, constants
from psychopy.tools.attributetools import attributeSetter
from .gamma import setGamma, setGammaRamp, getGammaRamp, getGammaRampSize
from .. import globalVars
from ._base import BaseBackend
import pyglet
# Ensure setting pyglet.options['debug_gl'] to False is done prior to any
# other calls to pyglet or pyglet submodules, otherwise it may not get picked
# up by the pyglet GL engine and have no effect.
# Shaders will work but require OpenGL2.0 drivers AND PyOpenGL3.0+
pyglet.options['debug_gl'] = False
GL = pyglet.gl
retinaContext = None # it will be set to an actual context if needed
class PygletBackend(BaseBackend):
"""The pyglet backend is the most used backend. It has no dependencies
or C libs that need compiling, but may not be as fast or efficient as libs
like GLFW.
"""
GL = pyglet.gl
def __init__(self, win, *args, **kwargs):
"""Set up the backend window according the params of the PsychoPy win
Before PsychoPy 1.90.0 this code was executed in Window._setupPygame()
:param: win is a PsychoPy Window (usually not fully created yet)
"""
BaseBackend.__init__(self, win) # sets up self.win=win as weakref
if win.allowStencil:
stencil_size = 8
else:
stencil_size = 0
vsync = 0
# provide warning if stereo buffers are requested but unavailable
if win.stereo and not GL.gl_info.have_extension('GL_STEREO'):
logging.warning(
'A stereo window was requested but the graphics '
'card does not appear to support GL_STEREO')
win.stereo = False
if sys.platform=='darwin' and not win.useRetina and pyglet.version >= "1.3":
raise ValueError("As of PsychoPy 1.85.3 OSX windows should all be "
"set to useRetina=True (or remove the argument). "
"Pyglet 1.3 appears to be forcing "
"us to use retina on any retina-capable screen "
"so setting to False has no effect.")
# multisampling
sample_buffers = 0
aa_samples = 0
if win.multiSample:
sample_buffers = 1
# get maximum number of samples the driver supports
max_samples = (GL.GLint)()
GL.glGetIntegerv(GL.GL_MAX_SAMPLES, max_samples)
if (win.numSamples >= 2) and (
win.numSamples <= max_samples.value):
# NB - also check if divisible by two and integer?
aa_samples = win.numSamples
else:
logging.warning(
'Invalid number of MSAA samples provided, must be '
'integer greater than two. Disabling.')
win.multiSample = False
# options that the user might want
config = GL.Config(depth_size=8, double_buffer=True,
sample_buffers=sample_buffers,
samples=aa_samples, stencil_size=stencil_size,
stereo=win.stereo,
vsync=vsync)
defDisp = pyglet.window.get_platform().get_default_display()
allScrs = defDisp.get_screens()
# Screen (from Exp Settings) is 1-indexed,
# so the second screen is Screen 1
if len(allScrs) < int(win.screen) + 1:
logging.warn("Requested an unavailable screen number - "
"using first available.")
thisScreen = allScrs[0]
else:
thisScreen = allScrs[win.screen]
if win.autoLog:
logging.info('configured pyglet screen %i' % self.screen)
# if fullscreen check screen size
if win._isFullScr:
win._checkMatchingSizes(win.size, [thisScreen.width,
thisScreen.height])
w = h = None
else:
w, h = win.size
if win.allowGUI:
style = None
else:
style = 'borderless'
try:
self.winHandle = pyglet.window.Window(
width=w, height=h,
caption="PsychoPy",
fullscreen=win._isFullScr,
config=config,
screen=thisScreen,
style=style)
except pyglet.gl.ContextException:
# turn off the shadow window an try again
pyglet.options['shadow_window'] = False
self.winHandle = pyglet.window.Window(
width=w, height=h,
caption="PsychoPy",
fullscreen=self._isFullScr,
config=config,
screen=thisScreen,
style=style)
logging.warning(
"Pyglet shadow_window has been turned off. This is "
"only an issue for you if you need multiple "
"stimulus windows, in which case update your "
"graphics card and/or graphics drivers.")
if sys.platform == 'win32':
# pyHook window hwnd maps to:
# pyglet 1.14 -> window._hwnd
# pyglet 1.2a -> window._view_hwnd
if pyglet.version > "1.2":
win._hw_handle = self.winHandle._view_hwnd
else:
win._hw_handle = self.winHandle._hwnd
elif sys.platform == 'darwin':
if win.useRetina:
global retinaContext
retinaContext = self.winHandle.context._nscontext
view = retinaContext.view()
bounds = view.convertRectToBacking_(view.bounds()).size
if win.size[0] == bounds.width:
win.useRetina = False # the screen is not a retina display
win.size = np.array([int(bounds.width), int(bounds.height)])
try:
# python 32bit (1.4. or 1.2 pyglet)
win._hw_handle = self.winHandle._window.value
except Exception:
# pyglet 1.2 with 64bit python?
win._hw_handle = self.winHandle._nswindow.windowNumber()
elif sys.platform.startswith('linux'):
win._hw_handle = self.winHandle._window
if win.useFBO: # check for necessary extensions
if not GL.gl_info.have_extension('GL_EXT_framebuffer_object'):
msg = ("Trying to use a framebuffer object but "
"GL_EXT_framebuffer_object is not supported. Disabled")
logging.warn(msg)
win.useFBO = False
if not GL.gl_info.have_extension('GL_ARB_texture_float'):
msg = ("Trying to use a framebuffer object but "
"GL_ARB_texture_float is not supported. Disabling")
logging.warn(msg)
win.useFBO = False
if pyglet.version < "1.2" and sys.platform == 'darwin':
platform_specific.syncSwapBuffers(1)
# add these methods to the pyglet window
self.winHandle.setGamma = setGamma
self.winHandle.setGammaRamp = setGammaRamp
self.winHandle.getGammaRamp = getGammaRamp
self.winHandle.set_vsync(True)
self.winHandle.on_text = event._onPygletText
self.winHandle.on_key_press = event._onPygletKey
self.winHandle.on_mouse_press = event._onPygletMousePress
self.winHandle.on_mouse_release = event._onPygletMouseRelease
self.winHandle.on_mouse_scroll = event._onPygletMouseWheel
if not win.allowGUI:
# make mouse invisible. Could go further and make it 'exclusive'
# (but need to alter x,y handling then)
self.winHandle.set_mouse_visible(False)
self.winHandle.on_resize = _onResize # avoid circular reference
if not win.pos:
# work out where the centre should be
if win.useRetina:
win.pos = [(thisScreen.width - win.size[0]/2) / 2,
(thisScreen.height - win.size[1]/2) / 2]
else:
win.pos = [(thisScreen.width - win.size[0]) / 2,
(thisScreen.height - win.size[1]) / 2]
if not win._isFullScr:
# add the necessary amount for second screen
self.winHandle.set_location(int(win.pos[0] + thisScreen.x),
int(win.pos[1] + thisScreen.y))
try: # to load an icon for the window
iconFile = os.path.join(psychopy.prefs.paths['resources'],
'psychopy.ico')
icon = pyglet.image.load(filename=iconFile)
self.winHandle.set_icon(icon)
except Exception:
pass # doesn't matter
# store properties of the system
self._driver = pyglet.gl.gl_info.get_renderer()
self._origGammaRamp = self.getGammaRamp()
self._rampSize = getGammaRampSize(self.screenID, self.xDisplay)
self._TravisTesting = (os.environ.get('TRAVIS') == 'true')
@property
def shadersSupported(self):
# on pyglet shaders are fine so just check GL>2.0
return pyglet.gl.gl_info.get_version() >= '2.0'
def swapBuffers(self, flipThisFrame=True):
"""Performs various hardware events around the window flip and then
performs the actual flip itself (assuming that flipThisFrame is true)
:param flipThisFrame: setting this to False treats this as a frame but
doesn't actually trigger the flip itself (e.g. because the device
needs multiple rendered frames per flip)
"""
# make sure this is current context
if globalVars.currWindow != self:
self.winHandle.switch_to()
globalVars.currWindow = self
GL.glTranslatef(0.0, 0.0, -5.0)
for dispatcher in self.win._eventDispatchers:
try:
dispatcher.dispatch_events()
except:
dispatcher._dispatch_events()
# this might need to be done even more often than once per frame?
self.winHandle.dispatch_events()
# for pyglet 1.1.4 you needed to call media.dispatch for
# movie updating
if pyglet.version < '1.2':
pyglet.media.dispatch_events() # for sounds to be processed
if flipThisFrame:
self.winHandle.flip()
def setMouseVisibility(self, visibility):
self.winHandle.set_mouse_visible(visibility)
def setCurrent(self):
"""Sets this window to be the current rendering target
:return: None
"""
if self != globalVars.currWindow:
self.winHandle.switch_to()
globalVars.currWindow = self
win = self.win # it's a weakref so faster to call just once
# if we are using an FBO, bind it
if hasattr(win, 'frameBuffer'):
GL.glBindFramebufferEXT(GL.GL_FRAMEBUFFER_EXT,
win.frameBuffer)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0_EXT)
GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT0_EXT)
# NB - check if we need these
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glEnable(GL.GL_STENCIL_TEST)
def dispatchEvents(self):
"""Dispatch events to the event handler (typically called on each frame)
:return:
"""
wins = pyglet.window.get_platform().get_default_display().get_windows()
for win in wins:
win.dispatch_events()
def onResize(self, width, height):
_onResize(width, height)
@attributeSetter
def gamma(self, gamma):
self.__dict__['gamma'] = gamma
if gamma is not None:
setGamma(
screenID=self.screenID,
newGamma=gamma,
rampSize=self._rampSize,
driver=self._driver,
xDisplay=self.xDisplay
)
@attributeSetter
def gammaRamp(self, gammaRamp):
"""Gets the gamma ramp or sets it to a new value (an Nx3 or Nx1 array)
"""
self.__dict__['gammaRamp'] = gammaRamp
setGammaRamp(self.screenID, gammaRamp, nAttempts=3,
xDisplay=self.xDisplay)
def getGammaRamp(self):
return getGammaRamp(self.screenID, self.xDisplay)
@property
def screenID(self):
"""Returns the screen ID or device context (depending on the platform)
for the current Window
"""
if sys.platform == 'win32':
scrBytes = self.winHandle._dc
if constants.PY3:
try:
_screenID = 0xFFFFFFFF & int.from_bytes(scrBytes, byteorder='little')
except TypeError:
_screenID = 0xFFFFFFFF & scrBytes
else:
try:
_screenID = 0xFFFFFFFF & scrBytes
except TypeError:
_screenID = scrBytes
elif sys.platform == 'darwin':
try:
_screenID = self.winHandle._screen.id # pyglet1.2alpha1
except AttributeError:
_screenID = self.winHandle._screen._cg_display_id # pyglet1.2
elif sys.platform.startswith('linux'):
_screenID = self.winHandle._x_screen_id
return _screenID
@property
def xDisplay(self):
"""On X11 systems this returns the XDisplay being used and None on all
other platforms"""
if sys.platform.startswith('linux'):
return self.winHandle._x_display
def close(self):
"""Close the window and uninitialize the resources
"""
# Check if window has device context and is thus not closed
if self.winHandle.context is None:
return
# restore the gamma ramp that was active when window was opened
if not self._TravisTesting:
self.gammaRamp = self._origGammaRamp
_hw_handle = None
try:
_hw_handle = self.win._hw_handle
self.winHandle.close()
except Exception:
pass
# If iohub is running, inform it to stop looking for this win id
# when filtering kb and mouse events (if the filter is enabled of
# course)
try:
if IOHUB_ACTIVE and _hw_handle:
from psychopy.iohub.client import ioHubConnection
conn = ioHubConnection.ACTIVE_CONNECTION
conn.unregisterWindowHandles(_hw_handle)
except Exception:
pass
def setFullScr(self, value):
"""Sets the window to/from full-screen mode"""
self.winHandle.set_fullscreen(value)
def _onResize(width, height):
"""A default resize event handler.
This default handler updates the GL viewport to cover the entire
window and sets the ``GL_PROJECTION`` matrix to be orthogonal in
window space. The bottom-left corner is (0, 0) and the top-right
corner is the width and height of the :class:`~psychopy.visual.Window`
in pixels.
Override this event handler with your own to create another
projection, for example in perspective.
"""
global retinaContext
if height == 0:
height = 1
if retinaContext is not None:
view = retinaContext.view()
bounds = view.convertRectToBacking_(view.bounds()).size
back_width, back_height = (int(bounds.width), int(bounds.height))
else:
back_width, back_height = width, height
GL.glViewport(0, 0, back_width, back_height)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GL.glOrtho(-1, 1, -1, 1, -1, 1)
# GL.gluPerspective(90, 1.0 * width / height, 0.1, 100.0)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
|
hoechenberger/psychopy
|
psychopy/visual/backends/pygletbackend.py
|
Python
|
gpl-3.0
| 16,713
|
# -*- coding: utf-8 -*-
# Project : LM4paper
# Created by igor on 17-3-14
import os
import sys
import time
import json
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from mixlm.lm_train import *
from mixlm.clstmdnn import CLSTMDNN
from bmlm.common import CheckpointLoader
def load_from_checkpoint(saver, logdir):
sess = tf.get_default_session()
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
saver.restore(sess, os.path.join(logdir, ckpt.model_checkpoint_path))
return True
return False
class Model():
def __init__(self, logdir):
hps = CLSTMDNN.get_default_hparams().parse(FLAGS.hpconfig)
hps.num_gpus = FLAGS.num_gpus
hps.batch_size = 1
self.word_vocab = Vocabulary.from_file(os.path.join(FLAGS.vocabdir, "1b_word_vocab.txt"))
self.char_vocab = Vocabulary.from_file(os.path.join(FLAGS.vocabdir, "1b_char_vocab.txt"))
with tf.variable_scope("model"):
hps.num_sampled = 0
hps.keep_prob = 1.0
self.model = CLSTMDNN(hps, "eval", "/cpu:0")
if hps.average_params:
print("Averaging parameters for evaluation.")
self.saver = tf.train.Saver(self.model.avg_dict)
else:
self.saver = tf.train.Saver()
# Use only 4 threads for the evaluation
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=20,
inter_op_parallelism_threads=1)
self.sess = tf.Session(config=config)
with self.sess.as_default():
if load_from_checkpoint(self.saver, logdir):
global_step = self.model.global_step.eval()
print("Successfully loaded model at step=%s." % global_step)
else:
print("Can't restore model from %s" % logdir)
self.hps = hps
def get_char_embedding(self, char):
id = self.char_vocab.get_id(char)
x = np.zeros(shape=(4, 20, 16))
x[:, :, :] = id
vector = self.sess.run([self.model.char_embedding.outputs],
feed_dict={self.model.char_x: x})
# print(self.model.char_embedding)
return vector[0][0][0]
def get_word_embedding(self, word):
id = self.word_vocab.get_id(word)
x = np.zeros(shape=(4, 20))
x[:, :] = id
vector = self.sess.run([self.model.word_embedding.outputs],
feed_dict={self.model.word_x: x})
return vector[0][0][0]
def visualize_char(model, path="/home/aegis/igor/LM4paper/tests/textchar.txt", ):
chars = open(path, 'r').read().splitlines()
embedding = np.empty(shape=(len(chars), model.hps.emb_char_size), dtype=np.float32)
for i, char in enumerate(chars):
embedding[i] = model.get_char_embedding(char)
print(embedding)
print(embedding.shape)
logdir = "/data/visualog/char/"
metadata = os.path.join(logdir, "metadata.tsv")
with open(metadata, "w") as metadata_file:
for c in chars:
metadata_file.write("%s\n" % c)
tf.reset_default_graph()
with tf.Session() as sess:
X = tf.Variable([0.0], name='embedding')
place = tf.placeholder(tf.float32, shape=embedding.shape)
set_x = tf.assign(X, place, validate_shape=False)
sess.run(tf.global_variables_initializer())
sess.run(set_x, feed_dict={place: embedding})
saver = tf.train.Saver([X])
saver.save(sess, os.path.join(logdir, 'char.ckpt'))
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = X.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = metadata
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(logdir), config)
def visualize_word(model, path="/home/aegis/igor/LM4paper/tests/testdata.txt"):
words = open(path, 'r').read().splitlines()
embedding = np.empty(shape=(len(words), model.hps.emb_word_size), dtype=np.float32)
for i, w in enumerate(words):
embedding[i] = model.get_word_embedding(w)
print(embedding)
print(embedding.shape)
logdir = "/data/visualog/word/"
metadata = os.path.join(logdir, "metadata.tsv")
with open(metadata, "w") as metadata_file:
for w in words:
metadata_file.write("%s\n" % w)
tf.reset_default_graph()
with tf.Session() as sess:
X = tf.Variable([0.0], name='embedding')
place = tf.placeholder(tf.float32, shape=embedding.shape)
set_x = tf.assign(X, place, validate_shape=False)
sess.run(tf.global_variables_initializer())
sess.run(set_x, feed_dict={place: embedding})
saver = tf.train.Saver([X])
saver.save(sess, os.path.join(logdir, 'word.ckpt'))
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = X.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = metadata
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(logdir), config)
if __name__ == '__main__':
model = Model(logdir="/data/lmlog/train")
# vector = model.get_word_embedding("hello")
# print(vector)
visualize_word(model, path="/home/aegis/igor/LM4paper/tests/testword.txt")
|
IgorWang/LM4paper
|
mixlm/visualize.py
|
Python
|
gpl-3.0
| 5,856
|
import os
from flask import Flask, url_for, request, render_template, jsonify, send_file
from werkzeug.utils import secure_filename
import deepchem as dc
import subprocess
from shutil import copyfile
import csv
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static/')
DEEPCHEM_GUI = Flask('deepchem-gui', static_folder=STATIC_DIR,
static_url_path='/static',
template_folder=os.path.join(STATIC_DIR, 'deepchem-gui',
'templates')
)
UPLOAD_DIR = os.path.join(STATIC_DIR, "data/")
if not os.path.isdir(UPLOAD_DIR):
os.mkdir(UPLOAD_DIR)
print("Created data directory")
# serve ngl webapp clone
@DEEPCHEM_GUI.route('/')
def webapp():
return render_template('webapp.html')
# download protein and ligand files
@DEEPCHEM_GUI.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST':
proteins = request.files.getlist('proteins')
ligands = request.files.getlist('ligands')
smiles = request.files.getlist('smiles')
smarts = request.files.getlist('smarts')
if proteins and ligands:
protein_fns = []
ligand_fns = []
for protein in proteins:
protein_fn = os.path.join(
UPLOAD_DIR,
secure_filename(protein.filename)
)
protein.save(protein_fn)
protein_fns.append(protein_fn)
for ligand in ligands:
ligand_fn = os.path.join(
UPLOAD_DIR,
secure_filename(ligand.filename)
)
ligand.save(ligand_fn)
ligand_fns.append(ligand_fn)
docking_result = dock(protein_fns, ligand_fns)
print(docking_result)
for i in range(len(protein_fns)):
for j in range(len(ligand_fns)):
protein_fn = docking_result[i][j]["protein"]
new_protein_fn = protein_fn.split("/")[-1]
copyfile(protein_fn, os.path.join(
UPLOAD_DIR, new_protein_fn))
docking_result[i][j]["protein"] = url_for(
'static', filename="data/" + new_protein_fn)
ligand_fn = docking_result[i][j]["ligand"]
new_ligand_fn = ligand_fn.split("/")[-1]
copyfile(ligand_fn,
os.path.join(UPLOAD_DIR, new_ligand_fn))
docking_result[i][j]["ligand"] = url_for(
'static', filename="data/" + new_ligand_fn)
return jsonify(docking_result)
elif smiles:
smiles = smiles[0]
smiles_fn = os.path.join(
UPLOAD_DIR,
secure_filename(smiles.filename)
)
smiles.save(smiles_fn)
csvfile = open(smiles_fn, 'r')
csvreader = csv.reader(csvfile, delimiter=',')
data = []
for row in csvreader:
data.append(row)
data = render_smiles(data)
return jsonify(data)
elif smarts:
smarts = smarts[0]
smarts_fn = os.path.join(
UPLOAD_DIR,
secure_filename(smarts.filename)
)
smarts.save(smarts_fn)
csvfile = open(smarts_fn, 'r')
csvreader = csv.reader(csvfile, delimiter=',')
data = []
for row in csvreader:
data.append(row)
data = render_smarts(data)
return jsonify(data)
else:
return jsonify(error_msg="Invalid file transfer.")
else:
raise NotImplementedError
def render_smiles(data):
smiles_col_idx = [j for j in range(len(data[0])) if data[0][j]=="SMILES"][0]
for i, row in enumerate(data):
if i==0:
data[i].append("SMILES IMG")
continue
try:
smiles_str = data[i][smiles_col_idx]
smiles = Chem.MolFromSmiles(smiles_str)
AllChem.Compute2DCoords(smiles)
smiles_fn = 'smiles_%d.png' % i
smiles_img = os.path.join(UPLOAD_DIR, smiles_fn)
Draw.MolToFile(smiles, smiles_img)
data[i].append(url_for('static', filename='data/' + smiles_fn))
except Exception as e:
print(e)
data[i].append("Invalid")
pass
return data
def render_smarts(data):
smarts_col_idx = [j for j in range(len(data[0])) if data[0][j]=="SMARTS"][0]
smiles_col_idx_1 = [j for j in range(len(data[0])) if data[0][j]=="SMILES_1"][0]
smiles_col_idx_2 = [j for j in range(len(data[0])) if data[0][j]=="SMILES_2"][0]
for i, row in enumerate(data):
if i==0:
data[i].append("PRODUCT")
data[i].append("SMILES_1 IMG")
data[i].append("SMILES_2 IMG")
data[i].append("PRODUCT IMG")
continue
try:
smarts_str = data[i][smarts_col_idx]
smiles_str_1 = data[i][smiles_col_idx_1]
smiles_str_2 = data[i][smiles_col_idx_2]
rxn = AllChem.ReactionFromSmarts(smarts_str)
ps = rxn.RunReactants((Chem.MolFromSmiles(smiles_str_1), Chem.MolFromSmiles(smiles_str_2)))
product = ps[0][0]
product_str = Chem.MolToSmiles(product)
data[i].append(product_str)
AllChem.Compute2DCoords(product)
product_fn = 'product_%d.png' % i
product_img = os.path.join(UPLOAD_DIR, product_fn)
Draw.MolToFile(product, product_img)
smiles_1 = Chem.MolFromSmiles(smiles_str_1)
AllChem.Compute2DCoords(smiles_1)
smiles_1_fn = 'smiles_1_%d.png' % i
smiles_1_img = os.path.join(UPLOAD_DIR, smiles_1_fn)
Draw.MolToFile(smiles_1, smiles_1_img)
smiles_2 = Chem.MolFromSmiles(smiles_str_2)
AllChem.Compute2DCoords(smiles_2)
smiles_2_fn = 'smiles_2_%d.png' % i
smiles_2_img = os.path.join(UPLOAD_DIR, smiles_2_fn)
Draw.MolToFile(smiles_2, smiles_2_img)
data[i].append(url_for('static', filename='data/' + product_fn))
data[i].append(url_for('static', filename='data/' + smiles_1_fn))
data[i].append(url_for('static', filename='data/' + smiles_2_fn))
except Exception as e:
print(e)
data[i].append("Invalid")
data[i].append("Invalid")
data[i].append("Invalid")
pass
return data
def dock(protein_fns, ligand_fns):
docking_result = [[{} for j in range(len(ligand_fns))]
for i in range(len(protein_fns))]
for i in range(len(protein_fns)):
for j in range(len(ligand_fns)):
protein_fn = protein_fns[i]
ligand_fn = ligand_fns[j]
print("Docking: %s to %s" % (ligand_fn, protein_fn))
docker = dc.dock.VinaGridDNNDocker(
exhaustiveness=1, detect_pockets=False)
(score, (protein_docked, ligand_docked)
) = docker.dock(protein_fn, ligand_fn)
print("Scores: %f" % (score))
print("Docked protein: %s" % (protein_docked))
print("Docked ligand: %s" % (ligand_docked))
ligand_docked_fn = ligand_docked.replace(".pdbqt", "")
subprocess.call("csh %s %s" % (os.path.join(STATIC_DIR, 'deepchem-gui', 'scripts', 'stripqt.sh'),
ligand_docked_fn), shell=True)
ligand_docked_pdb = ligand_docked_fn + ".pdb"
docking_result[i][j] = {'score': score[
0], 'protein': protein_docked, 'ligand': ligand_docked_pdb}
return docking_result
|
deepchem/deepchem-gui
|
gui/app.py
|
Python
|
gpl-3.0
| 8,020
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""TODO"""
# This file is part of Linshare cli.
#
# LinShare cli is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LinShare cli is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LinShare cli. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2019 Frédéric MARTIN
#
# Contributors list :
#
# Frédéric MARTIN frederic.martin.fma@gmail.com
#
import copy
import json
from argparse import RawTextHelpFormatter
from linshareapi.cache import Time
from vhatable.cell import CellBuilder
from vhatable.cell import ComplexCell
from vhatable.cell import ComplexCellBuilder
from vhatable.filters import PartialOr
from linsharecli.user.core import DefaultCommand as Command
from linsharecli.common.core import add_list_parser_options
from linsharecli.common.cell import ActorCell
from linsharecli.common.cell import AuthUserCell
from linsharecli.common.tables import TableBuilder
class DefaultCommand(Command):
"""TODO"""
IDENTIFIER = "name"
MSG_RS_UPDATED = "The shared space member '%(account)s' (%(uuid)s) was successfully updated."
MSG_RS_CREATED = "The shared space member '%(account)s' (%(uuid)s) was successfully created."
CFG_DELETE_MODE = 1
CFG_DELETE_ARG_ATTR = "ss_uuid"
def complete(self, args, prefix):
super(DefaultCommand, self).__call__(args)
json_obj = self.ls.shared_spaces.list()
return (v.get(self.RESOURCE_IDENTIFIER)
for v in json_obj if v.get(self.RESOURCE_IDENTIFIER).startswith(prefix))
def complete_shared_spaces(self, args, prefix):
"""TODO"""
super(DefaultCommand, self).__call__(args)
json_obj = self.ls.shared_spaces.list()
return (v.get(self.RESOURCE_IDENTIFIER)
for v in json_obj if v.get(self.RESOURCE_IDENTIFIER).startswith(prefix))
class SharedSpaceCompleter(object):
"""TODO"""
# pylint: disable=too-few-public-methods
def __init__(self, config):
self.config = config
def __call__(self, prefix, **kwargs):
from argcomplete import debug
try:
debug("\n------------ SharedSpaceCompleter -----------------")
debug("Kwargs content :")
for i, j in list(kwargs.items()):
debug("key : " + str(i))
debug("\t - " + str(j))
debug("\n------------ SharedSpaceCompleter -----------------\n")
args = kwargs.get('parsed_args')
cmd = DefaultCommand(self.config)
return cmd.complete_shared_spaces(args, prefix)
# pylint: disable=broad-except
except Exception as ex:
debug("\nERROR:An exception was caught :" + str(ex) + "\n")
import traceback
traceback.print_exc()
debug("\n------\n")
return ["comlete-error"]
class ResourceCell(ComplexCell):
"""TODO"""
_format_filter = '{uuid}'
def __unicode__(self):
if self.raw:
return str(self.value)
if self.value is None:
return self.none
action = self.row['action']
resource_type = self.row['type']
fmt = 'Missing format. {raw}'
data = {}
data['action'] = action
data['raw'] = "?"
if self.extended:
fmt = 'Missing format.\n{raw}'
data['raw'] = json.dumps(
copy.deepcopy(self.value),
sort_keys=True, indent=2
)
if resource_type == "WORKGROUP":
if action == "CREATE":
fmt = 'New workGroup : {name} ({uuid:.8})'
data.update(self.value)
elif resource_type == "WORKGROUP_MEMBER":
if action == "CREATE":
fmt = 'New member : {name} ({uuid:.8})'
if self.vertical:
fmt = 'New member : {name} ({uuid})'
data.update(self.value['user'])
elif resource_type == "WORKGROUP_FOLDER":
if action == "CREATE":
fmt = 'New folder : {name} ({uuid:.8})'
if self.vertical:
fmt = 'New folder : {name} ({uuid})'
data.update(self.value)
elif resource_type == "WORKGROUP_DOCUMENT":
if action == "CREATE":
fmt = 'New document : {name} ({uuid:.8})'
if self.vertical:
fmt = 'New document : {name} ({uuid})'
data.update(self.value)
elif resource_type == "WORKGROUP_DOCUMENT_REVISION":
if action == "CREATE":
fmt = 'New version : {name} ({uuid:.8})'
if self.vertical:
fmt = 'New version : {name} ({uuid})'
data.update(self.value)
return fmt.format(**data)
class ListCommand(Command):
""" List all Jwt token."""
IDENTIFIER = "creationDate"
RESOURCE_IDENTIFIER = "uuid"
@Time('linsharecli.shared_spaces.audit', label='Global time : %(time)s')
def __call__(self, args):
super(ListCommand, self).__call__(args)
endpoint = self.ls.shared_spaces.audit
tbu = TableBuilder(self.ls, endpoint, self.DEFAULT_SORT)
tbu.load_args(args)
tbu.add_filters(
PartialOr(self.IDENTIFIER, args.identifiers, True),
PartialOr(self.RESOURCE_IDENTIFIER, args.uuids, True, match_raw=True),
PartialOr("resource", [args.resource], True, match_raw=False),
)
tbu.add_custom_cell("actor", ActorCell)
tbu.add_custom_cell("authUser", AuthUserCell)
tbu.add_custom_cell("uuid", CellBuilder('{value:.8}', '{value}'))
tbu.add_custom_cell("resource", ResourceCell)
tbu.add_custom_cell(
"workGroup",
ComplexCellBuilder(
'{name}\n({uuid:.8})',
'{name} ({uuid:})',
'{name}',
)
)
table = tbu.build().load_v2(endpoint.list(args.ss_uuid))
table.align['resource'] = "l"
return table.render()
def complete_fields(self, args, prefix):
"""TODO"""
# pylint: disable=unused-argument
super(ListCommand, self).__call__(args)
cli = self.ls.shared_spaces.audit
return cli.get_rbu().get_keys(True)
def add_parser(subparsers, name, desc, config):
"""TODO"""
parser_tmp = subparsers.add_parser(name, help=desc)
parser_tmp.add_argument(
'ss_uuid',
help="shared_space uuid"
).completer = SharedSpaceCompleter(config)
subparsers2 = parser_tmp.add_subparsers()
# command : list
parser = subparsers2.add_parser(
'list',
formatter_class=RawTextHelpFormatter,
help="list shared space audit traces")
parser.add_argument('identifiers', nargs="*", help="filter by fragments of date")
parser.add_argument('-u', '--uuid', dest="uuids", action="append",
help="Filter by uuid fragments.")
parser.add_argument('-e', '--resource', action="store",
help="Filter by resource uuid")
add_list_parser_options(parser, cdate=True)
parser.set_defaults(__func__=ListCommand(config))
|
fred49/linshare-cli
|
linsharecli/user/shared_space_audit.py
|
Python
|
gpl-3.0
| 7,635
|
#!/bin/python2
import os, gzip, StringIO, time, csv, datetime
from flask import Flask, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
from wtforms import Form, DecimalField, validators
class UpdateForm(Form):
weight = DecimalField('Weight', [validators.DataRequired()])
fat = DecimalField('Body fat', [validators.DataRequired()])
water = DecimalField('Body water', [validators.DataRequired()])
muscle = DecimalField('Muscle', [validators.DataRequired()])
bonemass = DecimalField('Bone mass', [validators.DataRequired()])
calories = DecimalField('Calories', [validators.DataRequired()])
app = Flask(__name__)
CSV_FILENAME = "smartscale.csv"
types = ["weight", "fat", "water", "muscle", "bonemass", "calories"]
@app.route('/stats')
def stats():
with open(CSV_FILENAME) as f:
csv = f.read().splitlines()
data = [(line.split(',')) for line in csv]
return render_template('chart.html', types=types, values=data)
def updateData(data):
values = []
for t in types:
values.append(str(data[t]))
filename = CSV_FILENAME
timestr = time.strftime("%Y%m%d-%H%M%S")
with open(filename, "a") as fh:
fh.write(','.join(values) + "," + timestr + "\n")
@app.route('/', methods=['GET', 'POST'])
def update():
form = UpdateForm(request.form)
if request.method == 'POST' and form.validate():
updateData(form.data)
return redirect('stats')
return render_template('update.html', form=form)
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
bossen/smartscale
|
main.py
|
Python
|
gpl-3.0
| 1,578
|
#!/usr/bin/env python3
# Copyright (C) 2017
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
"""
resourcetool.py
Simple utility to list or update RADB resource availability values.
Essentially a tool around RADB getResources(), updateResourceAvailability(), getResourceClaims() and (parts of) updateResourceClaims().
Can also figure out available capacity for a mounted storage resource and update it in the RADB (-U/--update-available-storage-capacity option).
Can also update storage claim endtime to its task endtime (if ended) in the RADB (-E/--end-past-tasks-storage-claims option).
Examples (source lofarinit.sh to set LOFARROOT, PYTHONPATH, ...):
- Update available (local) storage capacity and set storage claim endtimes to task endtimes (if ended) for an observation storage node, e.g. via cron in operations:
source /opt/lofar/lofarinit.sh; LOFARENV=PRODUCTION /opt/lofar/bin/resourcetool --broker=scu001.control.lofar --end-past-tasks-storage-claims --update-available-storage-capacity
- Show all DRAGNET resources on the test system RADB:
LOFARENV=TEST resourcetool --broker=scu199.control.lofar --resource-group-root=DRAGNET
- Deactivate 2 storage resources in operations, because disks from both storage areas are found to be faulty (then still need to re-schedule tasks):
LOFARENV=PRODUCTION resourcetool --broker=scu001.control.lofar drg01_storage:/data1=False drg01_storage:/data2=False
NOTES:
! Be careful what system (operations or test) this command applies to! This can be set using the env vars LOFARENV=TEST or LOFARENV=PRODUCTION
Operations vs Test (vs Development) can be seen from the default RADB_BUSNAME in the usage info: lofar.* vs test.lofar.* vs devel.lofar.*
! By default, listed or updateable resources are restricted to resources under the localhost's resource group.
This is on purpose to make -U work correctly. The -G/--resource-group-root option can be used to widen the resource group scope for listing
or explicit command-line updates, but non-default -G with -U is rejected: it is too easy to mass-update other resources with local filesystem info.
"""
import logging
from datetime import datetime, timedelta
from lofar.messaging import DEFAULT_BROKER, DEFAULT_BUSNAME
from lofar.sas.resourceassignment.resourceassignmentservice.rpc import RADBRPC
from lofar.common.util import humanreadablesize
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.WARN)
logger = logging.getLogger(__name__)
def printResources(resources, scaled_units=True):
""" E.g.: resources = [{u'total_capacity': 3774873600, u'name': u'dragproc_bandwidth:/data', u'type_id': 3,
u'available_capacity': 3774873600, u'type_name': u'bandwidth', u'unit_id': 3,
u'active': True, u'used_capacity': 0, u'id': 118, u'unit': u'bits/second',
'claimable_capacity': 3774873600}, ...] # this key was added (not from RADB); it can be negative!
"""
header = {'id': 'RId', 'name': 'Resource Name', 'active': 'Active',
'available_capacity': ' Avail. Capacity', 'claimable_capacity': ' Claimable Cap.',
'total_capacity': ' Total Capacity', 'unit': 'Unit'}
print(('{id:4s} {name:24s} {active:6s} {available_capacity} {claimable_capacity} {total_capacity} {unit}'.format(**header)))
print('===================================================================================================')
resources.sort(key=lambda r: r['id']) # SQL could have done this better
for res in resources:
res['active'] = 'True' if res['active'] else 'False' # to solve bool formatting issue
if scaled_units and (res['type_name'] == 'storage' or res['type_name'] == 'bandwidth'):
unit_base = 1024 if res['type_name'] == 'storage' else 1000 # check type_name instead of unit as in printClaims()
res['available_capacity'] = humanreadablesize(res['available_capacity'], '', unit_base)
res['claimable_capacity'] = humanreadablesize(res['claimable_capacity'], '', unit_base)
res['total_capacity'] = humanreadablesize(res['total_capacity'] , '', unit_base)
cap_conv = '>16s'
else:
cap_conv = '16d'
print((('{id:4d} {name:24s} {active:6s} {available_capacity:' + cap_conv +
'} {claimable_capacity:' + cap_conv + '} {total_capacity:' + cap_conv + '} {unit}').format(**res)))
if not resources:
print('<no resources>')
def printClaims(claims, scaled_units=True):
""" E.g.: claims = [{u'claim_size': 76441190400, u'endtime': datetime.datetime(2018, 6, 13, 17, 40),
u'id': 67420, u'resource_id': 122, u'resource_name': u'drg01_storage:/data1',
u'resource_type_id': 5, u'resource_type_name': u'storage',
u'starttime': datetime.datetime(2017, 6, 13, 17, 30),
u'status': u'claimed', u'status_id': 1, u'task_id': 75409, ...}, ...]
"""
header = {'id': 'ClId', 'resource_name': 'Resource Name', 'starttime': 'Start Time', 'endtime': 'End Time',
'claim_size': 'Claim Size', 'status': 'Status'}
print(('{id:7s} {resource_name:24s} {starttime:19s} {endtime:19s} {claim_size:16s} {status:8s}'.format(**header)))
print('===================================================================================================')
claims.sort(key=lambda c: c['id']) # secondary sorting key; SQL could have done this better
claims.sort(key=lambda c: c['starttime']) # primary sorting key (stable sort)
for claim in claims:
if scaled_units and (claim['resource_type_name'] == 'storage' or claim['resource_type_name'] == 'bandwidth'):
unit_base = 1024 if claim['resource_type_name'] == 'storage' else 1000 # no unit name here, so check type_name
claim['claim_size'] = humanreadablesize(claim['claim_size'], '', unit_base)
size_conv = '>16s'
else:
size_conv = '16d'
print((('{id:7d} {resource_name:24s} {starttime} {endtime} {claim_size:' + size_conv +
'} {status:8s}').format(**claim)))
if not claims:
print('<no claims on specified resources and time range>')
def updateStorageClaimsEndTime(radb, resources, storage_resource_type_id, lower_bound=None, upper_bound=None):
""" Update storage claims on resources in the RADB that currently apply, but the task
they belong to has ended (+ a short while). Set end time of these claims to task endtime.
This is intended for user clusters (e.g. DRAGNET) that do not auto-terminate storage claims on
cleanup. If users manage clean up autonomously, then they manage all storage accounting themselves.
"""
status = 0
resource_ids = [res['id'] for res in resources]
now = datetime.utcnow()
if lower_bound is None:
lower_bound = now
if upper_bound is None:
upper_bound = now
claims = radb.getResourceClaims(lower_bound=lower_bound, upper_bound=upper_bound,
resource_ids=resource_ids,
resource_type=storage_resource_type_id)
# Get associated tasks for their end times. Update claims for tasks that ended.
task_ids = list(set({claim['task_id'] for claim in claims}))
tasks = radb.getTasks(task_ids=task_ids)
for task in tasks:
# Wait until task ended. Do not race with OTDBtoRATaskStatusPropagator that extends storage claim endtime.
# We effectively undo that extension here. Intended for clusters (e.g. DRAGNET) where end users manage storage.
new_endtime = task['endtime']
if now < new_endtime + timedelta(minutes=1):
continue
claim_ids = [claim['id'] for claim in claims if claim['task_id'] == task['id'] and \
claim['endtime'] > new_endtime]
print(("Updating RADB storage claims {} endtime to {}".format(claim_ids, new_endtime)))
updated_dict = radb.updateResourceClaims(where_resource_claim_ids=claim_ids, endtime=new_endtime)
if not updated_dict['updated']:
logger.error('failed to update RADB storage claims') # why is N/A here; check the RA logs
status = 1
return status
def updateResource(radb, resource):
""" Update the RADB using the resource dict. """
print(("Updating RADB with resource {}".format(resource)))
updated_dict = radb.updateResourceAvailability(resource_id=resource['id'], active=resource['active'],
available_capacity=resource['available_capacity'],
total_capacity=resource['total_capacity'])
if not updated_dict['updated']:
logger.error('failed to update RADB resource') # why is N/A here; check the RA logs
return 1
return 0
def getMountPoint(resource_name):
""" E.g. with resource_name 'CEP4_storage:/data' or 'drg15_bandwidth:/data2' or 'CS002bw0',
this function returns: '/data' or '/data2' or None.
"""
sep_idx = resource_name.find(':/') # mount point must be an abs path
if sep_idx == -1:
return None
return resource_name[sep_idx + 1 : ]
def updateAvailableStorageCapacities(radb, resources):
import os
status = 0
for res in resources:
# All storage resource names are supposedly mount points.
# But do not update with the wrong partition info (sys maintenance?).
# Log error and let admin figure it out. RADB resource defaults may need updating too.
mount_pt = getMountPoint(res['name'])
if mount_pt is None or not os.path.ismount(mount_pt):
logger.error("skipped updating available capacity of resource '{}': its path is not a mount point on this system".format(res['name']))
status = 1
continue
# Retrieve avail capacity from filesystem and do some checks.
try:
st = os.statvfs(mount_pt)
except OSError as e:
logger.error('statvfs: ' + str(e))
status = 1
continue
avail_cap = st.f_bavail * st.f_frsize
total_cap = st.f_blocks * st.f_frsize
if total_cap != res['total_capacity']:
hint_arg = res['name'] + '=' + str(avail_cap) + ',' + str(total_cap)
logger.warn("total capacity for resource '{}' is {}, which is not equal to {} as listed in the RADB. If the total capacity has changed permanently, please update the RADB, e.g. by running this program passing: {} (and by updating the software repo for RADB reinstalls).".format(res['name'], total_cap, res['total_capacity'], hint_arg))
if avail_cap > res['total_capacity']:
logger.error("the detected available capacity for resource '{}' cannot be written to the RADB, because it is greater than the total capacity listed in the RADB.")
status = 1
continue
# Only update available capacity in the RADB.
# Total and active indicate a config change (or maintenance in progress). Leave that for an admin.
res_update = {'id': res['id'], 'available_capacity': avail_cap,
'total_capacity': None, 'active': None}
status |= updateResource(radb, res_update)
return status
def updateSpecifiedCapacities(radb, resources, resource_updates):
status = 0
for res_update in resource_updates:
# Need resource id from name to apply the update. Also check avail <= total.
try:
res = next((res for res in resources if res['name'] == res_update['name']))
except StopIteration:
logger.error("skipped updating resource '{}': name unknown. Correct the name or (correct the) use (of) the -G/--resource-group-root option to widen the resource scope, e.g. -G CEP4|DRAGNET|LOFAR".format(res_update['name']))
status = 1
continue
if res_update['available_capacity'] is not None and \
res_update['total_capacity'] is None and \
res_update['available_capacity'] > res['total_capacity']:
logger.error("skipped updating resource '{}': specified available capacity cannot be greater than total capacity listed in the RADB. If the total capacity has changed permanently, please update the RADB using this program (and by updating the software repo for RADB reinstalls)".format(res_update['name']))
status = 1
continue
res_update['id'] = res['id']
status |= updateResource(radb, res_update)
return status
def getResourceGroupIdByName(db_rgp2rgp, name):
""" Returns group id of resource group named name, or None if name was not found.
The search happens breadth-first.
"""
# find root group(s): empty parent list
gids = [gid for gid, group in list(db_rgp2rgp.items()) if not group['parent_ids']]
i = 0
while i < len(gids): # careful iterating while modifying
res_group = db_rgp2rgp[gids[i]]
if res_group['resource_group_name'] == name:
return gids[i]
gids.extend(res_group['child_ids'])
i += 1
return None
def getSubtreeResourceIdList(db_rgp2rgp, root_gid):
""" Returns list of resource ids in resource group root_gid and its (grand)children."""
# Search breadth-first starting at root_gid.
gids = [root_gid]
resource_id_list = []
i = 0
while i < len(gids): # careful iterating while modifying
res_group = db_rgp2rgp[gids[i]]
resource_id_list.extend(res_group['resource_ids'])
gids.extend(res_group['child_ids'])
i += 1
return resource_id_list
def parseResourceArg(arg):
""" Return dict parsed from arg str. Arg format: resource_name:/data=True,100,200
with any value optional after the '=' (but need at least one).
Any returned dict value but the resource name may be None.
On error ValueError is raised.
"""
eq_idx = arg.find('=')
if eq_idx == -1:
raise ValueError("could not find '=' in argument; need e.g. res_name=100 or resource_name=True,100,200")
resource_name = arg[ : eq_idx]
if not resource_name:
raise ValueError("invalid resource name in argument before '='; need e.g. res_name=100 or resource_name=True,100,200")
resource_val = arg[eq_idx + 1 : ]
vals = resource_val.split(',')
if not vals or len(vals) > 3:
raise ValueError("need 1-3 argument value(s) after '=', e.g. res_name=100 or resource_name=True,100,200")
active = None
avail_cap = None
total_cap = None
for val in vals:
if val == 'True' or val == 'False':
if active is not None:
raise ValueError("accepting at most 1 bool as resource active value in argument")
active = True if val == 'True' else False
continue
if total_cap is not None:
raise ValueError("accepting at most 2 ints as resource available and total capacities in argument")
v = int(val)
if v < 0:
raise ValueError("capacity value must be positive")
if avail_cap is None:
avail_cap = v
else:
if v < avail_cap:
raise ValueError("specified available capacity cannot be greater than specified total capacity")
total_cap = v
return {'name': resource_name, 'active': active,
'available_capacity': avail_cap, 'total_capacity': total_cap}
def parseTimestamps(datetime_fmt, timestamps):
""" Return list of None or datetime objects representing timestamps. Raise ValueError on parse error.
Use datetime_fmt as the strptime() format str. A timestamp value may also be 'now' (UTC) or 'None'.
"""
# Parsing datetime strings could be done by extending optparse's Option class, but this works well enough
rv = []
now = None
for ts in timestamps:
if ts is None or ts == 'now':
if now is None:
now = datetime.utcnow()
ts = now
elif ts == 'None':
ts = None
else:
ts = datetime.strptime(ts, datetime_fmt)
rv.append(ts)
return rv
def parseArgs(args):
from socket import gethostname
hostname = gethostname()
from optparse import OptionParser
usage = 'Usage: %prog [OPTIONS] [resource_name=available_capacity]... or [resource_name=True|False[,avail_cap[,total_cap]]]...'
descr = 'List or update LOFAR RADB resource availability and/or available/total capacities'
parser = OptionParser(usage=usage, description=descr)
# already supported options: -h, --help, --
parser.add_option('-q', '--broker', dest='broker', default=DEFAULT_BROKER,
help='qpid broker hostname (default: %default).')
parser.add_option('--busname', dest='busname', default=DEFAULT_BUSNAME,
help='Name of the bus for all messaging operations (default: %default)')
parser.add_option('-G', '--resource-group-root', dest='resource_group_root', default=hostname,
help='Only consider resources under resource group root (default: this hostname: \'%default\' (all=LOFAR))')
parser.add_option('-t', '--resource-type', dest='resource_type', default=None,
help='Only consider resources of this type (e.g. storage, bandwidth, rsp, rcu, ...)')
parser.add_option('-E', '--end-past-tasks-storage-claims', dest='end_storage_claims', action='store_true', default=False,
help='WARNING: USE THIS OPTION ONLY FOR DRAGNET!. Set end time to task stoptime for storage claims under --resource-group-root for completed tasks. Implies -t storage. Can be limited to timerange given by -T and -S.')
parser.add_option('-U', '--update-available-storage-capacity', dest='update_avail', action='store_true', default=False,
help='Update the available capacity value in the RADB of storage resources under --resource-group-root. Implies -t storage. Not affected by -T and -S.')
datetime_fmt = '%Y-%m-%d %H:%M:%S'
parser.add_option('-T', '--timestart', dest='timestart',
help='lower bound UTC timestamp \'{}\' or \'now\' or \'None\' for resource claims (default: now)'.format(datetime_fmt))
parser.add_option('-S', '--timestop', dest='timestop',
help='upper bound UTC timestamp \'{}\' or \'now\' or \'None\' for resource claims (default: now)'.format(datetime_fmt))
parser.add_option('--no-scaled-units', dest='no_scaled_units', action='store_true', default=False,
help='Print raw instead of scaled units for some sizes, e.g. 1048576 instead of 1M')
options, left_over_args = parser.parse_args(args)
if options.update_avail and options.resource_group_root != hostname:
parser.error("combining the option -U with a non-default -G is rejected: it is too easy to mass-update the wrong resources")
if options.end_storage_claims or options.update_avail:
if options.resource_type is None:
options.resource_type = 'storage'
elif options.resource_type != 'storage':
parser.error("the options -E or -U cannot be combined with -t {}, because -E and -U are about storage only".format(options.resource_type))
try:
timestamps = parseTimestamps(datetime_fmt, (options.timestart, options.timestop))
except ValueError as exc:
parser.error("timestamp arguments: " + str(exc))
options.timestart = timestamps[0]
options.timestop = timestamps[1]
if options.timestart is not None and options.timestop is not None and options.timestart > options.timestop:
parser.error("-T/--timestart option value may not be after -S/--timestop option value")
resource_updates = []
for i, arg in enumerate(left_over_args):
try:
resource_updates.append(parseResourceArg(arg))
except ValueError as exc:
parser.error("failed to parse non-option argument '{}': {}".format(i, exc))
return options, resource_updates, parser.print_help
def main(args):
import os
os.environ['TZ'] = 'UTC' # LOFAR observatory software uses UTC
options, resource_updates, print_help_func = parseArgs(args)
status = 0
radb = None
try:
radb = RADBRPC.create(exchange=options.busname, broker=options.broker)
db_resource_list = radb.getResources(resource_types=options.resource_type, include_availability=True)
if options.timestart is None:
options.timestart = datetime(1970, 1, 1)
if options.timestop is None:
options.timestop = datetime(2100, 1, 1)
# Filter resource list via resource root group option
db_resource_group_mships = radb.getResourceGroupMemberships()
db_rgp2rgp = db_resource_group_mships['groups'] # resource-group-to-resource-group relations
group_id = getResourceGroupIdByName(db_rgp2rgp, options.resource_group_root)
if group_id is None:
print_help_func()
print("")
logger.error("could not find resource group '{}'. You may want to (correct the) use (of) the -G/--resource-group-root option to widen the resource scope, e.g. -G CEP4|DRAGNET|LOFAR".format(options.resource_group_root))
return 1
resource_id_list = getSubtreeResourceIdList(db_rgp2rgp, group_id)
if not resource_id_list:
print_help_func()
print("")
logger.error("no resources found under resource group '{}' and its (grand)children".format(options.resource_group_root))
return 1
resources = [res for res in db_resource_list if res['id'] in resource_id_list] # SQL could have done this better
if options.end_storage_claims:
try:
storage_resource_type_id = next((res['type_id'] for res in resources))
except StopIteration:
print_help_func()
print("")
logger.error("-E/--end-past-tasks-storage-claims used, but no storage resources found under resource group '{}' and its (grand)children".format(options.resource_group_root))
return 1
status |= updateStorageClaimsEndTime(radb, resources, storage_resource_type_id, lower_bound=options.timestart, upper_bound=options.timestop)
if options.update_avail:
status |= updateAvailableStorageCapacities(radb, resources)
if resource_updates:
status |= updateSpecifiedCapacities(radb, resources, resource_updates)
# If no specific action requested, print list of resources and claims
if not options.end_storage_claims and not options.update_avail and not resource_updates:
resource_ids = [res['id'] for res in resources]
claims = radb.getResourceClaims(lower_bound=options.timestart, upper_bound=options.timestop,
resource_ids=resource_ids, extended=True)
# A small downside of querying RADB again is that the claimable capacities might be inconsistent with claims just retrieved.
# We could derive it ourselves or stick it in a transaction, but this is good enough for the overview.
for res in resources:
res['claimable_capacity'] = radb.get_resource_claimable_capacity(resource_id=res['id'],
lower_bound=options.timestart,
upper_bound=options.timestop)
printResources(resources, not options.no_scaled_units)
print("")
printClaims(claims, not options.no_scaled_units)
#except Exception: # disabled: prefer default stacktrace on bug here
finally:
if radb is not None:
radb.close()
return status
if __name__ == '__main__':
from sys import argv, exit
exit(main(argv[1:]))
|
kernsuite-debian/lofar
|
SAS/DataManagement/ResourceTool/resourcetool.py
|
Python
|
gpl-3.0
| 24,976
|
"""
Python implementation of the matrix information measurement examples from the
StackExchange answer written by WilliamAHuber for
"Measuring entropy/ information/ patterns of a 2d binary matrix"
http://stats.stackexchange.com/a/17556/43909
Copyright 2014 Cosmo Harrigan
This program is free software, distributed under the terms of the GNU LGPL v3.0
"""
__author__ = 'Cosmo Harrigan'
from matplotlib import pyplot
from neighborhood_functions import avg_components
from moving_window_filter import moving_window_filter
from calculate_profile import profile
# Function to apply
F = avg_components
# Define the matrices as input_matrices
from data import *
# Iterate over the input matrices
for m in range(0, len(input_matrices)):
active_matrix = input_matrices[m]
print("---------\nMatrix #{0}\n---------\n".format(m))
# Produce the filtered matrices at varying scales and the associated
# entropy "profiles"
matrices = []
for n in range(1, min(active_matrix.shape)):
output_matrix = moving_window_filter(matrix=active_matrix,
f=F,
neighborhood_size=n)
matrices.append(output_matrix)
subplot = pyplot.subplot(5, 4, m * 4 + n)
pyplot.axis('off')
pyplot.imshow(output_matrix,
interpolation='nearest',
cmap='Greys_r',
vmin=0,
vmax=1)
print("Neighborhood size = {0}\n{1}\n".format(n, output_matrix))
print("Profile:\n{0}\n".format(profile(matrices)))
pyplot.show()
|
cosmoharrigan/matrix-entropy
|
main.py
|
Python
|
gpl-3.0
| 1,624
|
#! /usr/bin/env python
# DBus to turn USB on or off (by unbinding the driver)
# The System D-bus
import dbus
import dbus.service
from gi.repository import GLib
from dbus.mainloop.glib import DBusGMainLoop
from usb_inhibit import USB_inhibit
class USB_Service_Blocker(dbus.service.Object):
inhibitor_work = False
def __init__(self):
self.usb_monitor = USB_inhibit(True)
bus_name = dbus.service.BusName('org.gnome.USBBlocker', bus=dbus.SystemBus())
dbus.service.Object.__init__(self, bus_name, '/org/gnome/USBBlocker')
@dbus.service.method(dbus_interface='org.gnome.USBBlocker.inhibit', \
in_signature='', out_signature='b')
def get_status(self):
return self.inhibitor_work
@dbus.service.method(dbus_interface='org.gnome.USBBlocker.inhibit')
def start(self):
print("Start monitoring Dbus system message")
if not self.inhibitor_work:
self.usb_monitor.start()
self.inhibitor_work = True
@dbus.service.method(dbus_interface='org.gnome.USBBlocker.inhibit')
def stop(self):
print("Stop monitoring Dbus system message")
if self.inhibitor_work:
self.usb_monitor.stop()
self.inhibitor_work = False
@dbus.service.method(dbus_interface='org.gnome.USBBlocker.device',
in_signature='ss', out_signature='b')
def enable_device(self, bus_id, dev_id):
print (bus_id)
print (dev_id)
import time; time.sleep(0.03)
return self.usb_monitor.bind_driver(bus_id, dev_id)
DBusGMainLoop(set_as_default=True)
dbus_service = USB_Service_Blocker()
mainloop = GLib.MainLoop()
try:
mainloop.run()
except KeyboardInterrupt:
print("\nThe MainLoop will close...")
mainloop.quit()
|
murarugeorgec/USB-checking
|
USB/USB_DBus/usb_dbus_system.py
|
Python
|
gpl-3.0
| 1,795
|
import argparse
import ui.output
def help_format_cloudcredgrab(prog):
kwargs = dict()
kwargs['width'] = ui.output.columns()
kwargs['max_help_position'] = 34
format = argparse.HelpFormatter(prog, **kwargs)
return (format)
def parse(args):
parser = argparse.ArgumentParser(prog="cloudcredgrab", add_help=False, usage=argparse.SUPPRESS)
parser.formatter_class = help_format_cloudcredgrab
parser.add_argument('-u', '--username',
metavar="<USER>", default=None)
parser.add_argument('platform')
options = vars(parser.parse_args(args))
|
nil0x42/phpsploit
|
plugins/credentials/cloudcredgrab/plugin_args.py
|
Python
|
gpl-3.0
| 596
|
from rambutan3.check_args.base.RAbstractTypeMatcher import RAbstractTypeMatcher
from rambutan3.check_args.set.RSetEnum import RSetEnum
from rambutan3.check_args.set.RSetOfMatcher import RSetOfMatcher
# noinspection PyPep8Naming
def BUILTIN_SET_OF(type_matcher: RAbstractTypeMatcher) -> RSetOfMatcher:
x = RSetOfMatcher(RSetEnum.BUILTIN_SET, type_matcher)
return x
|
kevinarpe/kevinarpe-rambutan3
|
rambutan3/check_args/annotation/BUILTIN_SET_OF.py
|
Python
|
gpl-3.0
| 373
|
# -*- coding: utf8 -*-
'''
基本工具
Created on 2014年5月14日
@author: Exp
'''
''' 获取系统时间 '''
def getSysTime(format = "%Y-%m-%d %H:%M:%S"):
import time
return time.strftime(format)
# End Fun getSysTime()
''' 判断是否为本地运行环境,否则为SAE运行环境 '''
def isLocalEnvironment():
from os import environ
return not environ.get("APP_NAME", "")
# End Fun isLocalEnvironment()
''' 加密字符串 '''
def encrypt(plaintext):
import base64
return base64.encodestring(plaintext)
# End Fun encrypt()
''' 解密字符串 '''
def decrypt(ciphertext):
import base64
return base64.decodestring(ciphertext)
# End Fun decrypt()
''' 简单编码转换,把未知编码的orgStr转码为aimCharset,其中orgStr的源编码由系统自动判断 '''
def simpleTranscoding(orgStr, aimCharset):
import chardet
orgCharset = chardet.detect(orgStr)['encoding'] #自动判断编码
return transcoding(orgStr, orgCharset, aimCharset)
# End Fun simpleTranscoding()
''' 编码转换,把源编码为orgCharset的orgStr,转码为aimCharset '''
def transcoding(orgStr, orgCharset, aimCharset):
unicodeStr = orgStr.decode(orgCharset)
return unicodeStr.encode(aimCharset)
# End Fun transcoding()
|
lyy289065406/expcodes
|
python/99-project/django-web/ExpPH/ExpPH/utils/BaseUtils.py
|
Python
|
gpl-3.0
| 1,328
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import re
import unicodedata
h1_start = re.compile(r"^\s*=(?P<title>[^=]+)=*[ \t]*")
valid_title = re.compile(r"[^=]+")
general_heading = re.compile(r"^\s*(={2,6}(?P<title>" + valid_title.pattern +
")=*)\s*$", flags=re.MULTILINE)
invalid_symbols = re.compile(r"[^\w\-_\s]+")
def strip_accents(s):
return ''.join(
(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(
c) != 'Mn'))
REPLACEMENTS = {
ord('ä'): 'ae',
ord('ö'): 'oe',
ord('ü'): 'ue',
ord('ß'): 'ss',
ord('Ä'): 'Ae',
ord('Ö'): 'Oe',
ord('Ü'): 'Ue',
ord('ẞ'): 'SS'
}
def substitute_umlauts(s):
return s.translate(REPLACEMENTS)
def remove_unallowed_chars(s):
s = invalid_symbols.sub('', s)
return s
def remove_and_compress_whitespaces(s):
return '_'.join(s.split()).strip('_')
def turn_into_valid_short_title(title, short_title_set=(), max_length=20):
st = substitute_umlauts(title)
st = strip_accents(st)
st = remove_unallowed_chars(st)
st = remove_and_compress_whitespaces(st)
st = st.lstrip('1234567890-_')
st = st[:min(len(st), max_length)]
if not st:
st = 'sub'
if st not in short_title_set:
return st
else:
i = 0
while True:
i += 1
suffix = str(i)
new_st = st[:min(max_length - len(suffix), len(st))] + suffix
if new_st not in short_title_set:
return new_st
def get_heading_matcher(level=0):
if 0 < level < 7:
s = "%d" % level
elif level == 0:
s = "1, 6"
else:
raise ValueError(
"level must be between 1 and 6 or 0, but was %d." % level)
pattern = r"^\s*={%s}(?P<title>[^=§]+)" \
r"(?:§\s*(?P<short_title>[^=§\s][^=§]*))?=*\s*$"
return re.compile(pattern % s, flags=re.MULTILINE)
|
Qwlouse/Findeco
|
node_storage/validation.py
|
Python
|
gpl-3.0
| 1,992
|
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
import time
import traceback
import base64
from channelselector import get_thumb
from core import httptools
from core import servertools
from core import scrapertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from lib import generictools
from channels import filtertools
from channels import autoplay
# Canal común con Cinetorrent, Magnetpelis, Pelispanda, Yestorrent
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = list(IDIOMAS.values())
list_quality = []
list_servers = ['torrent']
canonical = {
'channel': 'cinetorrent',
'host': config.get_setting("current_host", 'cinetorrent', default=''),
'host_alt': ['https://cinetorrent.co/'],
'host_black_list': [],
'CF': False, 'CF_test': False, 'alfa_s': True
}
host = canonical['host'] or canonical['host_alt'][0]
channel = canonical['channel']
categoria = channel.capitalize()
host_torrent = host[:-1]
patron_host = '((?:http.*\:)?\/\/(?:.*ww[^\.]*)?\.?(?:[^\.]+\.)?[\w|\-]+\.\w+)(?:\/|\?|$)'
patron_domain = '(?:http.*\:)?\/\/(?:.*ww[^\.]*)?\.?(?:[^\.]+\.)?([\w|\-]+\.\w+)(?:\/|\?|$)'
domain = scrapertools.find_single_match(host, patron_domain)
__modo_grafico__ = config.get_setting('modo_grafico', channel) # TMDB?
IDIOMAS_TMDB = {0: 'es', 1: 'en', 2: 'es,en'}
idioma_busqueda = IDIOMAS_TMDB[config.get_setting('modo_grafico_lang', channel)] # Idioma base para TMDB
idioma_busqueda_VO = IDIOMAS_TMDB[2] # Idioma para VO
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel) #Actualización sólo últ. Temporada?
timeout = config.get_setting('timeout_downloadpage', channel)
season_colapse = config.get_setting('season_colapse', channel) # Season colapse?
filter_languages = config.get_setting('filter_languages', channel) # Filtrado de idiomas?
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis = get_thumb("channels_movie.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_genero = get_thumb("genres.png")
thumb_anno = get_thumb("years.png")
thumb_calidad = get_thumb("top_rated.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, title="Películas", action="submenu",
url=host, thumbnail=thumb_pelis, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title=" - por Género", action="genero",
url=host, thumbnail=thumb_genero, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title=" - por Año", action="anno",
url=host, thumbnail=thumb_anno, extra="peliculas"))
if channel not in ['magnetpelis']:
itemlist.append(Item(channel=item.channel, title=" - por Calidad", action="calidad",
url=host, thumbnail=thumb_calidad, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title="Series", action="submenu",
url=host, thumbnail=thumb_series, extra="series"))
itemlist.append(Item(channel=item.channel, title=" - por Año", action="anno",
url=host, thumbnail=thumb_anno, extra="series"))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search",
url=host, thumbnail=thumb_buscar, extra="search"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]",
folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal",
thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def submenu(item):
patron = '<li\s*class="header__nav-item">\s*<a\s*href="([^"]+)"\s*class="header__nav-link">([^<]+)<\/a>'
data, response, item, itemlist = generictools.downloadpage(item.url, timeout=timeout, canonical=canonical,
s2=False, patron=patron, item=item, itemlist=[]) # Descargamos la página
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not response.sucess or itemlist: # Si ERROR o lista de errores ..
return itemlist # ... Salimos
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.debug(patron)
#logger.debug(matches)
#logger.debug(data)
if not matches:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " +
" / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.category +
': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. '
+ 'Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
for scrapedurl, scrapedtitle in matches:
if scrapertools.slugify(scrapedtitle) in item.extra:
item.url = urlparse.urljoin(host, scrapedurl.replace(scrapertools.find_single_match(scrapedurl, patron_host), ''))
if not item.url.endswith('/'): item.url += '/'
item.url += 'page/1'
return listado(item)
return itemlist
def anno(item):
logger.info()
from platformcode import platformtools
itemlist = []
patron = '(?i)<a\s*class="dropdown-toggle\s*header__nav-link"\s*href="#"\s*'
patron += 'role="button"\s*data-toggle="dropdown">[^<]*A.O\s*<\/a>\s*'
patron += '<ul\s*class="dropdown-menu\s*header__dropdown-menu">\s*(.*?)\s*<\/ul>\s*<\/li>'
data, response, item, itemlist = generictools.downloadpage(item.url, timeout=timeout, canonical=canonical,
s2=False, patron=patron, item=item, itemlist=[]) # Descargamos la página
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not response.sucess or itemlist: # Si ERROR o lista de errores ...
return itemlist # ... Salimos
data = scrapertools.find_single_match(data, patron)
patron = '<li><a\s*href="([^"]+)"\s*target="[^"]*">([^<]+)\s*<\/a>\s*<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.debug(patron)
#logger.debug(matches)
#logger.debug(data)
if not matches:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " +
" / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.category +
': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. '
+ 'Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
year = platformtools.dialog_numeric(0, "Introduzca el Año de búsqueda", default="")
item.url = re.sub(r'years/\d+', 'years/%s' % year, matches[0][0])
if not item.url.endswith('/'): item.url += '/'
item.url += 'page/1'
item.extra2 = 'anno' + str(year)
return listado(item)
def genero(item):
logger.info()
itemlist = []
patron = '(?i)<a\s*class="dropdown-toggle\s*header__nav-link"\s*href="#"\s*'
patron += 'role="button"\s*data-toggle="dropdown">[^<]*G.nero\s*<\/a>\s*'
patron += '<ul\s*class="dropdown-menu\s*header__dropdown-menu">\s*(.*?)\s*<\/ul>\s*<\/li>'
data, response, item, itemlist = generictools.downloadpage(item.url, timeout=timeout, canonical=canonical,
s2=False, patron=patron, item=item, itemlist=[]) # Descargamos la página
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not response.sucess or itemlist: # Si ERROR o lista de errores ...
return itemlist # ... Salimos
data = scrapertools.find_single_match(data, patron)
patron = '<li><a\s*href="([^"]+)"\s*target="[^"]*">([^<]+)\s*<\/a>\s*<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.debug(patron)
#logger.debug(matches)
#logger.debug(data)
if not matches:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " +
" / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.category +
': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. '
+ 'Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
for scrapedurl, gen in matches:
itemlist.append(item.clone(action="listado", title=gen.capitalize(), url=scrapedurl + 'page/1',
extra2='genero'))
return itemlist
def calidad(item):
logger.info()
itemlist = []
patron = '(?i)<a\s*class="dropdown-toggle\s*header__nav-link"\s*href="#"\s*'
patron += 'role="button"\s*data-toggle="dropdown">[^<]*calidad\s*<\/a>\s*'
patron += '<ul\s*class="dropdown-menu\s*header__dropdown-menu">\s*(.*?)\s*<\/ul>\s*<\/li>'
data, response, item, itemlist = generictools.downloadpage(item.url, timeout=timeout, canonical=canonical,
s2=False, patron=patron, item=item, itemlist=[]) # Descargamos la página
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not response.sucess or itemlist: # Si ERROR o lista de errores ...
return itemlist # ... Salimos
data = scrapertools.find_single_match(data, patron)
patron = '<li><a\s*href="([^"]+)"\s*target="[^"]*">([^<]+)\s*<\/a>\s*<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
#logger.debug(patron)
#logger.debug(matches)
#logger.debug(data)
if not matches:
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " +
" / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.category +
': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. '
+ 'Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
for scrapedurl, cal in matches:
if cal not in ['HD', '720p']:
itemlist.append(item.clone(action="listado", title=cal.capitalize(), url=scrapedurl + 'page/1',
extra2='calidad'))
return itemlist
def listado(item): # Listado principal y de búsquedas
logger.info()
itemlist = []
item.category = categoria
thumb_pelis = get_thumb("channels_movie.png")
thumb_series = get_thumb("channels_tvshow.png")
#logger.debug(item)
curr_page = 1 # Página inicial
last_page = 99999 # Última página inicial
last_page_print = 1 # Última página inicial, para píe de página
page_factor = 1.0 # Factor de conversión de pag. web a pag. Alfa
if item.curr_page:
curr_page = int(item.curr_page) # Si viene de una pasada anterior, lo usamos
del item.curr_page # ... y lo borramos
if item.last_page:
last_page = int(item.last_page) # Si viene de una pasada anterior, lo usamos
del item.last_page # ... y lo borramos
if item.page_factor:
page_factor = float(item.page_factor) # Si viene de una pasada anterior, lo usamos
del item.page_factor # ... y lo borramos
if item.last_page_print:
last_page_print = item.last_page_print # Si viene de una pasada anterior, lo usamos
del item.last_page_print # ... y lo borramos
cnt_tot = 30 # Poner el num. máximo de items por página
cnt_title = 0 # Contador de líneas insertadas en Itemlist
if item.cnt_tot_match:
cnt_tot_match = float(item.cnt_tot_match) # restauramos el contador TOTAL de líneas procesadas de matches
del item.cnt_tot_match
else:
cnt_tot_match = 0.0 # Contador TOTAL de líneas procesadas de matches
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout * 2 # Timeout para descargas
if item.extra == 'search' and item.extra2 == 'episodios': # Si viene de episodio que quitan los límites
cnt_tot = 999
fin = inicio + 30
#Sistema de paginado para evitar páginas vacías o semi-vacías en casos de búsquedas con series con muchos episodios
title_lista = [] # Guarda la lista de series que ya están en Itemlist, para no duplicar lineas
if item.title_lista: # Si viene de una pasada anterior, la lista ya estará guardada
title_lista.extend(item.title_lista) # Se usa la lista de páginas anteriores en Item
del item.title_lista # ... limpiamos
matches = []
if not item.extra2: # Si viene de Catálogo o de Alfabeto
item.extra2 = ''
post = None
if item.post: # Rescatamos el Post, si lo hay
post = item.post
next_page_url = item.url
# Máximo num. de líneas permitidas por TMDB. Máx de 5 segundos por Itemlist para no degradar el rendimiento
while (cnt_title < cnt_tot and curr_page <= last_page and fin > time.time()) or item.matches:
# Descarga la página
data = ''
cnt_match = 0 # Contador de líneas procesadas de matches
if not item.matches: # si no viene de una pasada anterior, descargamos
data, response, item, itemlist = generictools.downloadpage(next_page_url, canonical=canonical,
timeout=timeout_search, post=post, s2=False,
item=item, itemlist=itemlist) # Descargamos la página)
# Verificamos si ha cambiado el Host
if response.host:
next_page_url = response.url_new
# Verificamos si se ha cargado una página correcta
curr_page += 1 # Apunto ya a la página siguiente
if not data or not response.sucess: # Si la web está caída salimos sin dar error
if len(itemlist) > 1: # Si hay algo que pintar lo pintamos
last_page = 0
break
return itemlist # Si no hay nada más, salimos directamente
#Patrón para búsquedas, pelis y series
patron = '<div\s*class="[^"]+">\s*<div\s*class="card">\s*<a\s*href="([^"]+)"'
patron += '\s*class="card__cover">\s*<img[^>]+src="([^"]*)"\s*alt="[^"]*"\s*\/*>\s*'
patron += '<div\s*class="card__play">.*?<\/div>\s*<ul\s*class="card__list">\s*'
patron += '<li>([^<]+)<\/li>\s*<\/ul>\s*(?:<ul\s*class="card__list\s*right">\s*'
patron += '<li>(\w*)<\/li>[^"]*<\/ul>\s*)?<\/a>\s*<div\s*class="card__content">\s*'
patron += '<h3\s*class="card__title"><a\s*href="[^"]+">([^<]+)<\/a><\/h3>'
patron += '.*?<\/div>\s*<\/div>\s*<\/div>'
if not item.matches: # De pasada anterior?
matches = re.compile(patron, re.DOTALL).findall(data)
else:
matches = item.matches
del item.matches
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
if not matches and item.extra != 'search' and not item.extra2: #error
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web "
+ " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() +
': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. '
+ 'Reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
if not matches and item.extra == 'search': #búsqueda vacía
if len(itemlist) > 0: # Si hay algo que pintar lo pintamos
last_page = 0
break
return itemlist #Salimos
# Buscamos la próxima página
next_page_url = re.sub(r'page\/(\d+)', 'page/%s' % str(curr_page), item.url)
#logger.debug('curr_page: ' + str(curr_page) + ' / last_page: ' + str(last_page))
# Buscamos la última página
if last_page == 99999: #Si es el valor inicial, buscamos
patron_last = '<ul\s*class="pagination[^"]+">.*?<li>\s*<a\s*class="page-numbers"\s*href="[^"]+">'
patron_last += '(\d+)<\/a><\/li>\s*<li>\s*<a\s*class="next page-numbers"\s*href="[^"]+">»<\/a><\/li>\s*<\/ul>'
try:
last_page = int(scrapertools.find_single_match(data, patron_last))
page_factor = float(len(matches)) / float(cnt_tot)
except: #Si no lo encuentra, lo ponemos a 999
last_page = 1
last_page_print = int((float(len(matches)) / float(cnt_tot)) + 0.999999)
#logger.debug('curr_page: ' + str(curr_page) + ' / last_page: ' + str(last_page))
#Empezamos el procesado de matches
for scrapedurl, scrapedthumb, scrapedquality, scrapedlanguage, scrapedtitle in matches:
cnt_match += 1
title = scrapedtitle
title = scrapertools.remove_htmltags(title).rstrip('.') # Removemos Tags del título
url = scrapedurl
title_subs = [] #creamos una lista para guardar info importante
# Slugify, pero más light
title = title.replace("á", "a").replace("é", "e").replace("í", "i")\
.replace("ó", "o").replace("ú", "u").replace("ü", "u")\
.replace("�", "ñ").replace("ñ", "ñ")
title = scrapertools.decode_utf8_error(title)
# Se filtran las entradas para evitar duplicados de Temporadas
url_list = url
if url_list in title_lista: #Si ya hemos procesado el título, lo ignoramos
continue
else:
title_lista += [url_list] #la añadimos a la lista de títulos
# Si es una búsqueda por años, filtramos por tipo de contenido
if item.extra == 'series' and '/serie' not in url:
continue
elif item.extra == 'peliculas' and '/serie' in url:
continue
cnt_title += 1 # Incrementamos el contador de entradas válidas
item_local = item.clone() #Creamos copia de Item para trabajar
if item_local.tipo: #... y limpiamos
del item_local.tipo
if item_local.totalItems:
del item_local.totalItems
if item_local.intervencion:
del item_local.intervencion
if item_local.viewmode:
del item_local.viewmode
item_local.extra2 = True
del item_local.extra2
item_local.text_bold = True
del item_local.text_bold
item_local.text_color = True
del item_local.text_color
# Después de un Search se restablecen las categorías
if item_local.extra == 'search':
if '/serie' in url:
item_local.extra = 'series' # Serie búsqueda
else:
item_local.extra = 'peliculas' # Película búsqueda
# Procesamos idiomas
item_local.language = [] #creamos lista para los idiomas
if '[Subs. integrados]' in scrapedquality or '(Sub Forzados)' in scrapedquality \
or 'Sub' in scrapedquality or 'ing' in scrapedlanguage.lower():
item_local.language = ['VOS'] # añadimos VOS
if 'lat' in scrapedlanguage.lower():
item_local.language += ['LAT'] # añadimos LAT
if 'castellano' in scrapedquality.lower() or ('español' in scrapedquality.lower() \
and not 'latino' in scrapedquality.lower()) or 'cas' in scrapedlanguage.lower():
item_local.language += ['CAST'] # añadimos CAST
if '[Dual' in title or 'dual' in scrapedquality.lower() or 'dual' in scrapedlanguage.lower():
title = re.sub(r'(?i)\[dual.*?\]', '', title)
item_local.language += ['DUAL'] # añadimos DUAL
if not item_local.language:
item_local.language = ['LAT'] # [LAT] por defecto
# Procesamos Calidad
if scrapedquality:
item_local.quality = scrapertools.remove_htmltags(scrapedquality) # iniciamos calidad
if '[720p]' in scrapedquality.lower() or '720p' in scrapedquality.lower():
item_local.quality = '720p'
if '[1080p]' in scrapedquality.lower() or '1080p' in scrapedquality.lower():
item_local.quality = '1080p'
if '4k' in scrapedquality.lower():
item_local.quality = '4K'
if '3d' in scrapedquality.lower() and not '3d' in item_local.quality.lower():
item_local.quality += ', 3D'
if not item_local.quality or item_local.extra == 'series':
item_local.quality = '720p'
item_local.thumbnail = '' #iniciamos thumbnail
item_local.url = urlparse.urljoin(host, url) #guardamos la url final
item_local.context = "['buscar_trailer']" #... y el contexto
# Guardamos los formatos para series
if item_local.extra == 'series' or '/serie' in item_local.url:
item_local.contentType = "tvshow"
item_local.action = "episodios"
item_local.season_colapse = season_colapse #Muestra las series agrupadas por temporadas?
else:
# Guardamos los formatos para películas
item_local.contentType = "movie"
item_local.action = "findvideos"
#Limpiamos el título de la basura innecesaria
if item_local.contentType == "tvshow":
title = scrapertools.find_single_match(title, '(^.*?)\s*(?:$|\(|\[|-)')
title = re.sub(r'(?i)TV|Online|(4k-hdr)|(fullbluray)|4k| - 4k|(3d)|miniserie', '', title).strip()
item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|repack|internal|real|extended|masted|docu|super|duper|amzn|uncensored|hulu',
'', item_local.quality).strip()
#Analizamos el año. Si no está claro ponemos '-'
item_local.infoLabels["year"] = '-'
try:
if 'anno' in item.extra2:
item_local.infoLabels["year"] = int(item.extra2.replace('anno', ''))
except:
pass
#Terminamos de limpiar el título
title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
title = title.replace('()', '').replace('[]', '').replace('[4K]', '').replace('(4K)', '').strip().lower().title()
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
#Salvamos el título según el tipo de contenido
if item_local.contentType == "movie":
item_local.contentTitle = title
else:
item_local.contentSerieName = title.strip().lower().title()
item_local.title = title.strip().lower().title()
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
#Salvamos y borramos el número de temporadas porque TMDB a veces hace tonterias. Lo pasamos como serie completa
if item_local.contentSeason and (item_local.contentType == "season" \
or item_local.contentType == "tvshow"):
item_local.contentSeason_save = item_local.contentSeason
del item_local.infoLabels['season']
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if filter_languages > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) # Recalculamos los items después del filtrado
if cnt_title >= cnt_tot and (len(matches) - cnt_match) + cnt_title > cnt_tot * 1.3: #Contador de líneas añadidas
break
#logger.debug(item_local)
matches = matches[cnt_match:] # Salvamos la entradas no procesadas
cnt_tot_match += cnt_match # Calcular el num. total de items mostrados
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, __modo_grafico__, idioma_busqueda=idioma_busqueda)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Si es necesario añadir paginacion
if curr_page <= last_page or len(matches) > 0:
curr_page_print = int(cnt_tot_match / float(cnt_tot))
if curr_page_print < 1:
curr_page_print = 1
if last_page:
if last_page > 1:
last_page_print = int((last_page * page_factor) + 0.999999)
title = '%s de %s' % (curr_page_print, last_page_print)
else:
title = '%s' % curr_page_print
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente "
+ title, title_lista=title_lista, url=next_page_url, extra=item.extra,
extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page),
page_factor=str(page_factor), cnt_tot_match=str(cnt_tot_match), matches=matches,
last_page_print=last_page_print, post=post))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
matches = []
data = ''
response = {
'data': data,
'sucess': False,
'code': 0
}
response = type('HTTPResponse', (), response)
#logger.debug(item)
#Bajamos los datos de la página y seleccionamos el bloque
patron = '\s*<th\s*class="hide-on-mobile">Total\s*Descargas<\/th>\s*<th>'
patron += 'Descargar<\/th>\s*<\/thead>\s*<tbody>\s*(.*?<\/tr>)\s*<\/tbody>'
patron += '\s*<\/table>\s*<\/div>'
if not item.matches:
data, response, item, itemlist = generictools.downloadpage(item.url, timeout=timeout, canonical=canonical,
s2=False, patron=patron, item=item, itemlist=[]) # Descargamos la página)
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if (not data and not item.matches) or response.code == 999:
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
if len(item.emergency_urls) > 1:
matches = item.emergency_urls[1] #Restauramos matches de vídeos
elif len(item.emergency_urls) == 1 and item.emergency_urls[0]:
matches = item.emergency_urls[0] #Restauramos matches de vídeos - OLD FORMAT
item.armagedon = True #Marcamos la situación como catastrófica
else:
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
elif data:
# Seleccionamos el bloque y buscamos los apartados
data = scrapertools.find_single_match(data, patron)
patron = '<tr>(?:\s*<td>(\d+)<\/td>)?(?:\s*<td>([^<]*)<\/td>)?\s*<td>([^<]+)<\/td>'
patron += '\s*<td>([^<]*)<\/td>\s*<td\s*class=[^<]+<\/td>(?:\s*<td>([^<]+)<\/td>)?'
patron += '(?:\s*<td\s*class=[^<]+<\/td>)?\s*<td\s*class=[^<]+<\/td>\s*<td>\s*'
patron += '<a\s*class="[^>]+href="([^"]+)"'
if not item.armagedon:
if not item.matches:
matches = re.compile(patron, re.DOTALL).findall(data)
else:
matches = item.matches
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
if not matches: #error
return itemlist
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
if item.videolibray_emergency_urls:
item.emergency_urls = [] #Iniciamos emergency_urls
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
matches_list = [] # Convertimos matches-tuple a matches-list
for tupla in matches:
if isinstance(tupla, tuple):
matches_list.append(list(tupla))
if matches_list:
item.emergency_urls.append(matches_list) # Salvamnos matches de los vídeos...
else:
item.emergency_urls.append(matches)
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
if not item.videolibray_emergency_urls:
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
#Ahora tratamos los enlaces .torrent con las diferentes calidades
for x, (episode_num, scrapedserver, scrapedquality, scrapedlanguage, scrapedsize, scrapedurl) in enumerate(matches):
scrapedpassword = ''
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
item_local.url = generictools.convert_url_base64(scrapedurl, host_torrent)
if item.videolibray_emergency_urls and item_local.url != scrapedurl:
item.emergency_urls[1][x][4] = item_local.url
# Restauramos urls de emergencia si es necesario
local_torr = ''
if item.emergency_urls and not item.videolibray_emergency_urls:
try: # Guardamos la url ALTERNATIVA
if item.emergency_urls[0][0].startswith('http') or item.emergency_urls[0][0].startswith('//'):
item_local.torrent_alt = generictools.convert_url_base64(item.emergency_urls[0][0], host_torrent)
else:
item_local.torrent_alt = generictools.convert_url_base64(item.emergency_urls[0][0])
except:
item_local.torrent_alt = ''
item.emergency_urls[0] = []
from core import filetools
if item.contentType == 'movie':
FOLDER = config.get_setting("folder_movies")
else:
FOLDER = config.get_setting("folder_tvshows")
if item.armagedon and item_local.torrent_alt:
item_local.url = item_local.torrent_alt # Restauramos la url
if not item.torrent_alt.startswith('http'):
local_torr = filetools.join(config.get_videolibrary_path(), FOLDER, item_local.url)
if len(item.emergency_urls[0]) > 1:
del item.emergency_urls[0][0]
# Procesamos idiomas
item_local.language = [] #creamos lista para los idiomas
item_local.quality = scrapedquality # Copiamos la calidad
if '[Subs. integrados]' in scrapedlanguage or '(Sub Forzados)' in scrapedlanguage \
or 'Subs integrados' in scrapedlanguage:
item_local.language = ['VOS'] # añadimos VOS
if 'castellano' in scrapedlanguage.lower() or ('español' in scrapedlanguage.lower() and not 'latino' in scrapedlanguage.lower()):
item_local.language += ['CAST'] # añadimos CAST
if 'dual' in item_local.quality.lower():
item_local.quality = re.sub(r'(?i)dual.*?', '', item_local.quality).strip()
item_local.language += ['DUAL'] # añadimos DUAL
if not item_local.language:
item_local.language = ['LAT'] # [LAT] por defecto
#Buscamos tamaño en el archivo .torrent
size = ''
if item_local.torrent_info:
size = item_local.torrent_info
elif scrapedsize:
size = scrapedsize
if not size and not item.videolibray_emergency_urls and not item_local.url.startswith('magnet:'):
if not item.armagedon:
size = generictools.get_torrent_size(item_local.url, local_torr=local_torr) #Buscamos el tamaño en el .torrent
if 'ERROR' in size and item.emergency_urls and not item.videolibray_emergency_urls:
item_local.armagedon = True
try: # Restauramos la url
if item.emergency_urls[0][0].startswith('http') or item.emergency_urls[0][0].startswith('//'):
item_local.url = generictools.convert_url_base64(item.emergency_urls[0][0], host_torrent)
else:
item_local.url = generictools.convert_url_base64(item.emergency_urls[0][0])
if not item.url.startswith('http'):
local_torr = filetools.join(config.get_videolibrary_path(), FOLDER, item_local.url)
except:
item_local.torrent_alt = ''
item.emergency_urls[0] = []
size = generictools.get_torrent_size(item_local.url, local_torr=local_torr)
if size:
size = size.replace('GB', 'G·B').replace('Gb', 'G·b').replace('MB', 'M·B')\
.replace('Mb', 'M·b').replace('.', ',')
item_local.torrent_info = '%s, ' % size #Agregamos size
if item_local.url.startswith('magnet:') and not 'Magnet' in item_local.torrent_info:
item_local.torrent_info += ' Magnet'
if item_local.torrent_info:
item_local.torrent_info = item_local.torrent_info.strip().strip(',')
if item.videolibray_emergency_urls:
item.torrent_info = item_local.torrent_info
if not item.unify:
item_local.torrent_info = '[%s]' % item_local.torrent_info
# Guadamos la password del RAR
password = scrapedpassword
# Si tiene contraseña, la guardamos y la pintamos
if password or item.password:
if not item.password: item.password = password
item_local.password = item.password
itemlist.append(item.clone(action="", title="[COLOR magenta][B] Contraseña: [/B][/COLOR]'"
+ item_local.password + "'", folder=False))
# Guardamos urls de emergencia si se viene desde un Lookup de creación de Videoteca
if item.videolibray_emergency_urls:
item.emergency_urls[0].append(item_local.url) #guardamos la url y nos vamos
continue
if item_local.armagedon:
item_local.quality = '[COLOR hotpink][E][/COLOR] [COLOR limegreen]%s[/COLOR]' % item_local.quality
#Ahora pintamos el link del Torrent
item_local.title = '[[COLOR yellow]?[/COLOR]] [COLOR yellow][Torrent][/COLOR] ' \
+ '[COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR] %s' % \
(item_local.quality, str(item_local.language), \
item_local.torrent_info)
#Preparamos título y calidad, quitando etiquetas vacías
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
item_local.title = item_local.title.replace("--", "").replace("[]", "")\
.replace("()", "").replace("(/)", "").replace("[/]", "")\
.replace("|", "").strip()
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "")\
.replace("()", "").replace("(/)", "").replace("[/]", "")\
.replace("|", "").strip()
item_local.server = scrapedserver.lower() #Servidor
if item_local.url.startswith('magnet:') or not item_local.server:
item_local.server = 'torrent'
if item_local.server != 'torrent':
if config.get_setting("hidepremium"): #Si no se aceptan servidore premium, se ignoran
if not servertools.is_server_enabled(item_local.server):
continue
devuelve = servertools.findvideosbyserver(item_local.url, item_local.server) #existe el link ?
if not devuelve:
continue
item_local.url = devuelve[0][1]
item_local.alive = servertools.check_video_link(item_local.url, item_local.server, timeout=timeout) #activo el link ?
if 'NO' in item_local.alive:
continue
if not item_local.torrent_info or 'Magnet' in item_local.torrent_info:
item_local.alive = "??" #Calidad del link sin verificar
elif 'ERROR' in item_local.torrent_info and 'Pincha' in item_local.torrent_info:
item_local.alive = "ok" #link en error, CF challenge, Chrome disponible
elif 'ERROR' in item_local.torrent_info and 'Introduce' in item_local.torrent_info:
item_local.alive = "??" #link en error, CF challenge, ruta de descarga no disponible
item_local.channel = 'setting'
item_local.action = 'setting_torrent'
item_local.unify = False
item_local.folder = False
item_local.item_org = item.tourl()
elif 'ERROR' in item_local.torrent_info:
item_local.alive = "no" #Calidad del link en error, CF challenge?
else:
item_local.alive = "ok" #Calidad del link verificada
if item_local.channel != 'setting':
item_local.action = "play" #Visualizar vídeo
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
if item.videolibray_emergency_urls:
return item #... nos vamos
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host,
title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]",
thumbnail=thumb_separador, folder=False))
if len(itemlist_t) == 0:
if len(itemlist) == 0 or (len(itemlist) > 0 and itemlist[-1].server != 'torrent'):
return []
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
def episodios(item):
logger.info()
itemlist = []
item.category = categoria
#logger.debug(item)
if item.from_title:
item.title = item.from_title
#Limpiamos num. Temporada y Episodio que ha podido quedar por Novedades
season_display = 0
if item.contentSeason:
if item.season_colapse: #Si viene del menú de Temporadas...
season_display = item.contentSeason #... salvamos el num de sesión a pintar
item.from_num_season_colapse = season_display
del item.season_colapse
item.contentType = "tvshow"
if item.from_title_season_colapse:
item.title = item.from_title_season_colapse
del item.from_title_season_colapse
if item.infoLabels['title']:
del item.infoLabels['title']
del item.infoLabels['season']
if item.contentEpisodeNumber:
del item.infoLabels['episode']
if season_display == 0 and item.from_num_season_colapse:
season_display = item.from_num_season_colapse
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
idioma = idioma_busqueda
if 'VO' in str(item.language):
idioma = idioma_busqueda_VO
try:
tmdb.set_infoLabels(item, True, idioma_busqueda=idioma)
except:
pass
modo_ultima_temp_alt = modo_ultima_temp
if item.ow_force == "1": #Si hay una migración de canal o url, se actualiza todo
modo_ultima_temp_alt = False
# Vemos la última temporada de TMDB y del .nfo
max_temp = 1
if item.infoLabels['number_of_seasons']:
max_temp = item.infoLabels['number_of_seasons']
y = []
if modo_ultima_temp_alt and item.library_playcounts: #Averiguar cuantas temporadas hay en Videoteca
patron = 'season (\d+)'
matches = re.compile(patron, re.DOTALL).findall(str(item.library_playcounts))
for x in matches:
y += [int(x)]
max_temp = max(y)
# Si la series tiene solo una temporada, o se lista solo una temporada, guardamos la url y seguimos normalmente
list_temp = []
list_temp.append(item.url)
#logger.debug(list_temp)
# Descarga las páginas
for _url in list_temp: # Recorre todas las temporadas encontradas
url = _url
data, response, item, itemlist = generictools.downloadpage(url, timeout=timeout, canonical=canonical,
s2=False, item=item, itemlist=itemlist) # Descargamos la página
# Verificamos si ha cambiado el Host
if response.host:
for x, u in enumerate(list_temp):
list_temp[x] = list_temp[x].replace(scrapertools.find_single_match(url, patron_host), response.host.rstrip('/'))
url = response.url_new
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not response.sucess: # Si ERROR o lista de errores ...
return itemlist # ... Salimos
patron_temp = '<div\s*class="card-header">\s*<button\s*type="button"\s*data-toggle='
patron_temp += '"collapse"\s*data-target="#collapse-\d+">\s*<span>Temporada\s*(\d+)'
patron_temp += '<\/span>(.*?)<\/table>\s*<\/div>\s*<\/div>\s*<\/div>'
matches_temp = re.compile(patron_temp, re.DOTALL).findall(data)
#logger.debug("PATRON_temp: " + patron_temp)
#logger.debug(matches_temp)
#logger.debug(data)
patron = '<tr>(?:\s*<td>(\d+)<\/td>)?(?:\s*<td>([^<]*)<\/td>)?\s*<td>([^<]+)<\/td>'
patron += '\s*<td>([^<]*)<\/td>\s*<td\s*class=[^<]+<\/td>(?:\s*<td>([^<]+)<\/td>)?'
patron += '(?:\s*<td\s*class=[^<]+<\/td>)?\s*<td\s*class=[^<]+<\/td>\s*<td>\s*'
patron += '<a\s*class="[^>]+href="([^"]+)"'
for season_num, episodes in matches_temp:
matches = re.compile(patron, re.DOTALL).findall(episodes)
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(episodes)
# Recorremos todos los episodios generando un Item local por cada uno en Itemlist
for episode_num, scrapedserver, scrapedquality, scrapedlanguage, scrapedsize, scrapedurl in matches:
server = scrapedserver
if not server: server = 'torrent'
item_local = item.clone()
item_local.action = "findvideos"
item_local.contentType = "episode"
if item_local.library_playcounts:
del item_local.library_playcounts
if item_local.library_urls:
del item_local.library_urls
if item_local.path:
del item_local.path
if item_local.update_last:
del item_local.update_last
if item_local.update_next:
del item_local.update_next
if item_local.channel_host:
del item_local.channel_host
if item_local.active:
del item_local.active
if item_local.contentTitle:
del item_local.infoLabels['title']
if item_local.season_colapse:
del item_local.season_colapse
item_local.url = url # Usamos las url de la temporada, no hay de episodio
url_base64 = generictools.convert_url_base64(scrapedurl, host_torrent)
item_local.matches = []
item_local.matches.append((episode_num, server, scrapedquality, scrapedlanguage, scrapedsize, url_base64)) # Salvado Matches de cada episodio
item_local.context = "['buscar_trailer']"
if not item_local.infoLabels['poster_path']:
item_local.thumbnail = item_local.infoLabels['thumbnail']
# Extraemos números de temporada y episodio
try:
item_local.contentSeason = int(season_num)
except:
item_local.contentSeason = 1
try:
item_local.contentEpisodeNumber = int(episode_num)
except:
item_local.contentEpisodeNumber = 0
item_local.title = '%sx%s - ' % (str(item_local.contentSeason),
str(item_local.contentEpisodeNumber).zfill(2))
# Procesamos Calidad
if scrapedquality:
item_local.quality = scrapertools.remove_htmltags(scrapedquality) # iniciamos calidad
if '[720p]' in scrapedquality.lower() or '720p' in scrapedquality.lower():
item_local.quality = '720p'
if '[1080p]' in scrapedquality.lower() or '1080p' in scrapedquality.lower():
item_local.quality = '1080p'
if '4k' in scrapedquality.lower():
item_local.quality = '4K'
if '3d' in scrapedquality.lower() and not '3d' in item_local.quality.lower():
item_local.quality += ', 3D'
if not item_local.quality:
item_local.quality = '720p'
# Comprobamos si hay más de un enlace por episodio, entonces los agrupamos
if len(itemlist) > 0 and item_local.contentSeason == itemlist[-1].contentSeason \
and item_local.contentEpisodeNumber == itemlist[-1].contentEpisodeNumber \
and itemlist[-1].contentEpisodeNumber != 0: # solo guardamos un episodio ...
if itemlist[-1].quality:
if item_local.quality not in itemlist[-1].quality:
itemlist[-1].quality += ", " + item_local.quality # ... pero acumulamos las calidades
else:
itemlist[-1].quality = item_local.quality
itemlist[-1].matches.append(item_local.matches[0]) # Salvado Matches en el episodio anterior
continue # ignoramos el episodio duplicado
if season_display > 0: # Son de la temporada estos episodios?
if item_local.contentSeason > season_display:
break
elif item_local.contentSeason < season_display:
continue
if modo_ultima_temp_alt and item.library_playcounts: # Si solo se actualiza la última temporada de Videoteca
if item_local.contentSeason < max_temp:
continue # Sale del bucle actual del FOR
itemlist.append(item_local.clone())
#logger.debug(item_local)
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
if item.season_colapse and not item.add_videolibrary: #Si viene de listado, mostramos solo Temporadas
item, itemlist = generictools.post_tmdb_seasons(item, itemlist)
if not item.season_colapse: #Si no es pantalla de Temporadas, pintamos todo
# Pasada por TMDB y clasificación de lista por temporada y episodio
tmdb.set_infoLabels(itemlist, True, idioma_busqueda=idioma)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
#logger.debug(item)
return itemlist
def actualizar_titulos(item):
logger.info()
#Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
item = generictools.update_title(item)
#Volvemos a la siguiente acción en el canal
return item
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = host + 'buscar/page/1/?buscar=' + texto
item.extra = 'search'
if texto:
return listado(item)
else:
return []
except:
for line in sys.exc_info():
logger.error("{0}".format(line))
logger.error(traceback.format_exc(1))
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
item.title = "newest"
item.category_new = "newest"
item.channel = channel
try:
if categoria in ['peliculas', 'latino', 'torrent']:
item.url = host + "peliculas/page/1/"
if channel == 'yestorrent':
item.url = host + "Descargar-peliculas-completas/page/1/"
item.extra = "peliculas"
item.extra2 = "novedades"
item.action = "listado"
itemlist.extend(listado(item))
if len(itemlist) > 0 and (">> Página siguiente" in itemlist[-1].title or "Pagina siguiente >>" in itemlist[-1].title):
itemlist.pop()
if categoria in ['series', 'latino', 'torrent']:
item.category_new= 'newest'
item.url = host + "series/page/1/"
item.extra = "series"
item.extra2 = "novedades"
item.action = "listado"
itemlist.extend(listado(item))
if len(itemlist) > 0 and (">> Página siguiente" in itemlist[-1].title or "Pagina siguiente >>" in itemlist[-1].title):
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
for line in sys.exc_info():
logger.error("{0}".format(line))
logger.error(traceback.format_exc(1))
return []
return itemlist
|
alfa-addon/addon
|
plugin.video.alfa/channels/cinetorrent.py
|
Python
|
gpl-3.0
| 59,082
|
"""
Django settings for eproweb project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import django.conf.global_settings as DEFAULT_SETTINGS
from django.contrib.messages import constants as message
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'REPLACED_IN_LOCAL_SETTINGS_FILE'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["localhost", ".mercycorps.org"]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'rest_framework',
'rest_framework_swagger',
'crispy_forms',
'djangocosign',
'epro',
'feedback',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'eproweb.middleware.AjaxMessaging',
'eproweb.middleware.TimingMiddleware',
)
ROOT_URLCONF = 'eproweb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + "/templates/",],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'eproweb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Where to get redirected after logging in.
LOGIN_REDIRECT_URL = '/'
#https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = [os.path.join(BASE_DIR, 'fixtures'),]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# STATIC_ROOT = BASE_DIR + "/static/"
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK='bootstrap3'
### This is to map Django message levels to Boostrap3 alert levels ########
MESSAGE_TAGS = {message.DEBUG: 'debug',
message.INFO: 'info',
message.SUCCESS: 'success',
message.WARNING: 'warning',
message.ERROR: 'danger',}
# Using the Cosgin Authentication backend first.
AUTHENTICATION_BACKENDS = (
'djangocosign.cosign.CosignBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Email setup
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
# formatting options reference: https://docs.python.org/2/library/logging.html#formatter-objects
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(name)s %(funcName)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
'file': {
'class': 'logging.FileHandler',
'filename': "/var/log/httpd/eproweb_app.log",
'formatter': 'verbose',
},
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
},
},
'loggers': {
'django': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': True,
},
'epro': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
'feedback': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
'app_admins': {
'handlers': ['file', 'mail_admins',],
'level': 'ERROR',
'propagate': True,
'include_html': True,
},
},
}
|
mahmoodkhan/ranewal
|
htdocs/eproweb/settings/base.py
|
Python
|
gpl-3.0
| 5,611
|
#!/usr/bin/env python3
'''Test for DDNS forwarding'''
from dnstest.test import Test
t = Test()
master = t.server("knot")
slave = t.server("knot")
zone = t.zone("example.com.")
t.link(zone, master, slave, ddns=True)
t.start()
master.zones_wait(zone)
seri = slave.zones_wait(zone)
# OK
update = slave.update(zone)
update.add("forwarded.example.com.", 1, "TXT", "forwarded")
update.send("NOERROR")
resp = master.dig("forwarded.example.com.", "TXT")
resp.check("forwarded")
slave.zones_wait(zone, seri)
t.xfr_diff(master, slave, zone)
# NAME out of zone
update = slave.update(zone)
update.add("forwarded.", 1, "TXT", "forwarded")
update.send("NOTZONE")
resp = master.dig("forwarded.", "TXT")
resp.check(rcode="REFUSED")
t.sleep(3)
t.xfr_diff(master, slave, zone)
t.end()
|
CZ-NIC/knot
|
tests-extra/tests/ddns/forward/test.py
|
Python
|
gpl-3.0
| 777
|
"""
WSGI config for fitch project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fitch.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
winstonschroeder77/fitch-
|
fitch_app/webui/fitch/fitch/wsgi.py
|
Python
|
gpl-3.0
| 385
|
#! /usr/bin/env python
# ==========================================================================
# This scripts performs unit tests for the ctools package
#
# Copyright (C) 2012-2017 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import gammalib
import ctools
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import test_ctobssim
import test_ctselect
import test_ctphase
import test_ctfindvar
import test_ctbin
import test_ctlike
import test_cttsmap
import test_ctmodel
import test_ctskymap
import test_ctexpcube
import test_ctpsfcube
import test_ctedispcube
import test_ctbkgcube
import test_ctmapcube
import test_ctcubemask
import test_ctbutterfly
import test_ctulimit
import test_cterror
import test_ctprob
import test_pipelines
# ========================= #
# Perform ctools unit tests #
# ========================= #
def test(installed=False):
"""
Perform unit testing for ctools.
Parameters
----------
installed : bool, optional
Flag indicating whether the script has been installed or not
"""
# If we have an installed version then create a temporary
# directory and copy over all information that is needed
if installed:
# Create temporary working directory
import tempfile
path = tempfile.mkdtemp()
os.chdir(path)
# Get test directory
import inspect
testdir = inspect.getfile(ctools.tests)
dirname = os.path.dirname(testdir)
# Copy test data in "data" directory
os.system('cp -r %s %s' % (dirname+'/data', 'data'))
# Set test data environment variable
os.environ['TEST_DATA'] = 'data'
# ... otherwise set the calibration database to the one shipped with the
# package; we don't need to set the 'TEST_DATA', this is done by the
# test environment
else:
os.environ['CALDB'] = '%s/caldb' % (os.environ['TEST_SRCDIR'])
# Create a local "pfiles" directory and set PFILES environment variable
try:
os.mkdir('pfiles')
except:
pass
os.environ['PFILES'] = 'pfiles'
# Copy the ctools parameter files into the "pfiles" directory. For a
# non-installed test we copy the parameter files from the respective
# source directories into the "pfiles" directory, for an installed version
# we get all parameter files from the "syspfiles" directory. Also make
# sure that all parameter files are writable.
if not installed:
os.system('cp -r %s/src/*/*.par pfiles/' % (os.environ['TEST_SRCDIR']))
os.system('chmod u+w pfiles/*')
else:
os.system('cp -r %s/syspfiles/*.par pfiles/' % (os.environ['CTOOLS']))
os.system('chmod u+w pfiles/*')
# Define list of test suites
tests = [test_ctobssim.Test(),
test_ctselect.Test(),
test_ctphase.Test(),
test_ctfindvar.Test(),
test_ctbin.Test(),
test_ctlike.Test(),
test_cttsmap.Test(),
test_ctmodel.Test(),
test_ctskymap.Test(),
test_ctexpcube.Test(),
test_ctpsfcube.Test(),
test_ctedispcube.Test(),
test_ctbkgcube.Test(),
test_ctmapcube.Test(),
test_ctcubemask.Test(),
test_ctbutterfly.Test(),
test_ctulimit.Test(),
test_cterror.Test(),
test_ctprob.Test(),
test_pipelines.Test()]
# Allocate test suite container
suites = gammalib.GTestSuites('ctools unit testing')
# Set test suites and append them to suite container
for suite in tests:
suite.set()
suites.append(suite)
# Run test suite
success = suites.run()
# Save test results
if not installed:
suites.save('reports/ctools.xml')
else:
suites.save('ctools_reports.xml')
# Remove temporary direction
if installed:
os.system('rm -rf %s' % (path))
# Raise an exception in case of failure
if not success:
raise RuntimeError('At least one error occured during the test.')
# Return
return
# ======================== #
# Main routine entry point #
# ======================== #
if __name__ == '__main__':
# Run tests
test()
|
ctools/ctools
|
test/test_python_ctools.py
|
Python
|
gpl-3.0
| 4,967
|
from bisect import bisect_left
from lib import argmax, bin_search_left, yield_while
from math import inf
def search(arr): # binary search, length only. O(n\log n) time
st = [arr[0]] # st[i]: smallest tail of LIS of length i + 1. naturally sorted, and all elements are distinct
for x in arr:
if x > st[-1]: # if x is greater than the current smallest tail, then no need to search
st.append(x)
else:
st[bisect_left(st, x)] = x # returns the index of x if in st, or the index of the smallest element larger than x
return len(st)
def search2(arr): # binary search with reconstruction. O(n\log n) time, O(n) space
st = [0] # st[i]: index (in arr) of the smallest tail of the LIS of length i + 1
bt = [-1] # bt[i]: index (in arr) of the predecessor of arr[i] in the LIS so far, or -1 if arr[i] is the head. when finished, len(bt) == len(arr)
for i, x in enumerate(arr[1:], start=1):
if x > arr[st[-1]]: # x is greater than the current smallest tail
bt.append(st[-1]) # point to the previous element of the current tail of st
st.append(i)
else:
pos = bin_search_left(st, x, key=lambda j: arr[j])
assert pos < len(st)
bt.append(st[pos - 1] if pos > 0 else -1) # pos == 0 -> arr[i] is the new head
st[pos] = i
return list(yield_while(st[-1], lambda x: x >= 0, lambda x: bt[x]))[::-1] # indices only
def search3(arr): # DP with reconstruction. O(n^2) time, O(n) space
dp = [1] # dp[i]: maximum length of increasing subsequence with arr[i] as tail
bt = [-1] # bt[i]: index (in arr) of the largest possible predecessor of arr[i], or -1 if arr[i] is the head
for i, x in enumerate(arr[1:], start=1):
m = -1 # m: in search for bt[i]
for j in range(i):
if arr[j] < x and (m == -1 or dp[j] > dp[m]):
# among all j < i s.t. arr[j] < arr[i], maximise dp[j]. if multiple such j exist, take the first one
m = j
if m == -1: # arr[i] as the start of dp new increasing subsequence
dp.append(1)
bt.append(-1)
else:
dp.append(dp[m] + 1)
bt.append(m)
return list(yield_while(argmax(dp), lambda s: s >= 0, lambda s: bt[s]))[::-1] # indices only
def search_triple(arr): # returns whether a triple i < j < k exists s.t. arr[i] < arr[j] < arr[k]
fst, snd = inf
for x in arr:
if x < fst:
fst = x
elif fst < x < snd:
snd = x
elif x > snd:
return True
return False
if __name__ == '__main__':
from random import shuffle
std_test = {(0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15): (0, 4, 6, 9, 13, 15),
(2, 3, 1, 4, 0, 4, 0, 3, 1, 4, 0): (6, 8, 9)}
for k, v in std_test.items():
assert search(k) == len(search2(k)) == len(v)
for _ in range(100):
rnd_test = list(range(50)) * 4
shuffle(rnd_test)
n = search(rnd_test)
bs = search2(rnd_test)
dp = search3(rnd_test)
assert n == len(bs) == len(dp)
for i in range(n - 1):
assert rnd_test[bs[i]] < rnd_test[bs[i + 1]]
assert rnd_test[dp[i]] < rnd_test[dp[i + 1]]
|
liboyin/algo-prac
|
arrays/longest_increasing_subsequence.py
|
Python
|
gpl-3.0
| 3,290
|
資料 = [1, 2, 3, 4, 5]
'''
program: list1.py
'''
print(資料[:3])
print(資料[2:])
print(資料[1:2])
a = [3, 5, 7, 11, 13]
for x in a:
if x == 7:
print('list contains 7')
break
print(list(range(10)))
for 索引 in range(-5, 6, 2):
print(索引)
squares = [ x*x for x in range(0, 11) ]
print(squares)
a = [10, 'sage', 3.14159]
b = a[:]
#list.pop([i]) 取出 list 中索引值為 i 的元素,預設是最後一個
print(b.pop())
print(a)
數列 = [0]*10
print(數列)
'''
delete 用法
'''
a = [1, 2, 3, 4]
print("刪除之前:", a)
del a[:2]
print("刪除之後:", a)
|
2014c2g5/2014cadp
|
wsgi/local_data/brython_programs/list1.py
|
Python
|
gpl-3.0
| 609
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-06 05:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170105_0206'),
]
operations = [
migrations.AddField(
model_name='submission',
name='ques_ID',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='app.Question'),
preserve_default=False,
),
]
|
GDG-JSS-NOIDA/programmr
|
programmr/app/migrations/0003_submission_ques_id.py
|
Python
|
gpl-3.0
| 580
|
import json
import sys
if sys.version_info >= (3, 0):
from urllib.request import urlopen
else:
from urllib2 import urlopen
class Blocks:
def __init__(self):
self.version = None
self.block_hash = None
def load_info(self, block_number):
raise NotImplementError
def read_url(self, url):
try:
return urlopen(url).read().decode('utf-8')
except:
print('Error trying to read: ' + url +\
' / Try to open in a browser to see what the error is.')
sys.exit(0)
class Block_Toshi(Blocks):
def __init__(self):
TOSHI_API = 'https://bitcoin.toshi.io/api'
self.url = TOSHI_API + "/v0/blocks/{}"
def load_info(self, block_number):
json_block = json.loads(self.read_url(self.url.format(str(block_number))))
self.version = json_block['version']
self.block_hash = json_block['hash']
class Block_BlockR(Blocks):
def __init__(self):
BLOCKR_API = 'https://btc.blockr.io/api/'
self.url = BLOCKR_API + 'v1/block/info/{}'
def load_info(self, block_number):
json_block = json.loads(self.read_url(self.url.format(str(block_number))))
block_info = json_block['data']
self.version = block_info['version']
self.block_hash = block_info['hash']
|
camponez/watch-blockchain
|
blocks.py
|
Python
|
gpl-3.0
| 1,341
|
from common import fileio
def main():
ADJ = 4
fileio.FILE_ADDRESS = "../../res/problem011/grid.txt"
matrix = fileio.readFileAsMatrix()
products = []
products.extend(checkVertical(matrix, ADJ))
products.extend(checkHorizontal(matrix, ADJ))
products.extend(checkDiagonal(matrix, ADJ))
products.extend(checkReverseDiagonal(matrix, ADJ))
print(max(products))
def checkVertical(matrix, adj):
productList = []
for i in range(len(matrix) - adj):
for j in range(len(matrix[i])):
product = 1
for row in range(adj):
product *= matrix[i + row][j]
productList.append(product)
return productList
def checkDiagonal(matrix, adj):
productList = []
for i in range(len(matrix) - adj):
for j in range(len(matrix[i]) - adj):
product = 1
for pos in range(adj):
product *= matrix[i + pos][j + pos]
productList.append(product)
return productList
def checkReverseDiagonal(matrix, adj):
productList = []
for i in range(len(matrix) - 1, -1, -1):
for j in range(len(matrix[i]) - adj):
product = 1
for pos in range(adj):
product *= matrix[i - pos][j + pos]
productList.append(product)
return productList
def checkHorizontal(matrix, adj):
productList = []
for i in range(len(matrix)):
for j in range(len(matrix[i]) - adj):
product = 1
for col in range(adj):
product *= matrix[i][j + col]
productList.append(product)
return productList
if __name__ == '__main__':
main()
|
ZachOhara/Project-Euler
|
python/p011_p020/problem011.py
|
Python
|
gpl-3.0
| 1,427
|
#!/usr/bin/env python
'''
Plot distribution of each feature,
conditioned on its bfeature type
'''
import argparse
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from common import *
from information import utils
from scipy.stats import itemfreq
nbins = 100
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('features', type=load_npz,
help='Training data features (npz)')
parser.add_argument('output',
help='Output file with plots (pdf)')
return parser
if __name__ == "__main__":
args = opts().parse_args()
pdf = PdfPages(args.output)
dfs = args.features['ifeatures']
cfs = args.features['ffeatures']
print "Plotting float features"
bfs = args.features['bfeatures']
u = utils.unique_rows(bfs)
indices = [np.all(bfs==ui, axis=-1) for ui in u]
for j, f in enumerate(cfs.T):
print "...ffeature %d" % j
fig = plt.figure()
h = np.zeros(nbins)
not_nan = f[np.logical_not(np.isnan(f))]
f_min = not_nan.min()
f_max = not_nan.max()
x = np.linspace(f_min, f_max, nbins)
dx = (f_max - f_min) / nbins
for idx in indices:
h_new, bins = np.histogram(f[idx], range=(f_min, f_max), bins=nbins)
plt.bar(x, h_new, bottom=h, width=dx)
h += h_new
plt.xlim(f_min, f_max)
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('FFeature %d. # NaN = %d' % (j, np.sum(np.isnan(f))))
pdf.savefig(fig)
plt.close()
print "Plotting integer features"
for j, x in enumerate(dfs.T):
print "...dfeature %d" % j
freq = itemfreq(x)
fig = plt.figure()
xu = np.sort(np.unique(x))
h = np.zeros_like(xu)
for idx in indices:
f = itemfreq(x[idx])
h_new = np.zeros_like(h)
h_new[f[:,0]] = f[:,1]
plt.bar(xu, h_new, bottom=h)
h += h_new
plt.xlabel('f')
plt.ylabel('P(f)')
plt.title('DFeature %d' % j)
pdf.savefig(fig)
plt.close()
pdf.close()
|
timpalpant/KaggleTSTextClassification
|
scripts/plot_feature_distributions.py
|
Python
|
gpl-3.0
| 2,179
|
import os
import datetime
import lib.maglib as MSG
#这是一个对结果进行初步处理的库
#用来分离抓取结果,作者,发帖时间
#抓取结果应该储存在【用户端根目录】并以result命名
#在测试情况下,抓取结果文件为results.txt
#重要全局变量
PATH_SUFFIX = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
print(PATH_SUFFIX)
PATH_SUFFIX = PATH_SUFFIX[::-1]
PATH_SUFFIX = PATH_SUFFIX[PATH_SUFFIX.find('\\'):]
PATH_SUFFIX = PATH_SUFFIX[::-1]
print(PATH_SUFFIX)
PATH_RESULT_FILE = PATH_SUFFIX + "\\datasource.ini"
DBSETTINGS = {'H':'', #HOST
'U':'', #USER
'P':'', #PASSWORD
'D':''} #DATABASE_NAME
#该函数用于读取数据源信息
#返回值:成功true,否则false
def loadDataSource():
print("加载数据源配置:",PATH_RESULT_FILE)
f = open(PATH_RESULT_FILE,'rb')
data = f.read()
f.close()
data = data.decode('gbk', 'ignore')
dbl = data.split("\r\n")
for db in dbl:
DBSETTINGS[db[0]] = db[db.find('=')+1:].replace('\'','').replace(' ','')
return data
loadDataSource()
DBCONN = pymysql.connect(host=DBSETTINGS['H'], port=3306,user=DBSETTINGS['U'],passwd=DBSETTINGS['P'],db=DBSETTINGS['D'],charset='UTF8')
DBCUR = DBCONN.cursor()
#从数据库查询包含指定字词的所有数据集
#返回值:包含指定字词的数据集列表
def queryWordContainPostListbyKeyword(word):
SEL = "select CONTENT from `postdata` where CONTENT like('%" + word +"%')"
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
return datalist
#从数据库查询指定作者的所有帖子信息
#返回值:指定作者的所有回帖信息
# [ [主题帖链接,贴吧名,作者,帖子内容,发帖时间,回复给sb,所在页面],[......],..... ]
def queryPostdataListbyAuthor(author):
SEL = "select * from `postdata` where AUTHOR=\"" + author +"\""
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
return datalist
#从数据库查询最大日期
#返回值:一个最大日期
def queryDatasourceLatestTime():
SEL = "select MAX(DATE) from `postdata`"
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
return datalist[0][0]
#从数据库查询小日期
#返回值:一个最小日期
def queryDatasourceEarlyTime():
SEL = "select MIN(DATE) from `postdata`"
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
return datalist[0][0]
#从数据库查询指定作者的指定日期之间的数据集
#返回值:指定日期之间的数据集列表
# [ [主题帖链接,贴吧名,作者,帖子内容,发帖时间,回复给sb,所在页面],[......],..... ]
def queryPostdataListAfterTime(author,earlydatestr):
SEL = "select * from `postdata` where AUTHOR=\"" + author + "\" and DATE>'" + earlydatestr + "'"
DBCUR.execute("SET names 'utf8mb4'")
DBCUR.execute(SEL)
DBCONN.commit()
datalist = DBCUR.fetchall()
print(len(datalist))
return datalist
|
ankanch/tieba-zhuaqu
|
DSV-user-application-plugin-dev-kit/lib/result_functions_file.py
|
Python
|
gpl-3.0
| 3,240
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2014-2015 Jérémy Bobbio <lunar@debian.org>
# © 2015 Reiner Herrmann <reiner@reiner-h.de>
# © 2012-2013 Olivier Matz <zer0@droids-corp.org>
# © 2012 Alan De Smet <adesmet@cs.wisc.edu>
# © 2012 Sergey Satskiy <sergey.satskiy@gmail.com>
# © 2012 scito <info@scito.ch>
#
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
#
#
# Most of the code is borrowed from diff2html.py available at:
# http://git.droids-corp.org/?p=diff2html.git
#
# Part of the code is inspired by diff2html.rb from
# Dave Burt <dave (at) burt.id.au> (mainly for html theme)
#
import base64
import codecs
import collections
import contextlib
import hashlib
import html
import io
import logging
import os
import re
import sys
from urllib.parse import urlparse
from diffoscope import VERSION
from diffoscope.config import Config
from diffoscope.diff import SideBySideDiff, DIFFON, DIFFOFF
from ..icon import FAVICON_BASE64
from ..utils import sizeof_fmt, PrintLimitReached, DiffBlockLimitReached, \
Presenter, make_printer, PartialString
from . import templates
# minimum line size, we add a zero-sized breakable space every
# LINESIZE characters
LINESIZE = 20
TABSIZE = 8
# Characters we're willing to word wrap on
WORDBREAK = " \t;.,/):-"
JQUERY_SYSTEM_LOCATIONS = (
'/usr/share/javascript/jquery/jquery.js',
)
logger = logging.getLogger(__name__)
re_anchor_prefix = re.compile(r'^[^A-Za-z]')
re_anchor_suffix = re.compile(r'[^A-Za-z-_:\.]')
def send_and_exhaust(iterator, arg, default):
"""Send a single value to a coroutine, exhaust it, and return the final
element or a default value if it was empty."""
# Python's coroutine syntax is still a bit rough when you want to do
# slightly more complex stuff. Watch this logic closely.
output = default
try:
output = iterator.send(arg)
except StopIteration:
pass
for output in iterator:
pass
return output
def md5(s):
return hashlib.md5(s.encode('utf-8')).hexdigest()
def escape_anchor(val):
"""
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be followed
by any number of letters, digits ([0-9]), hyphens ("-"), underscores ("_"),
colons (":"), and periods (".").
"""
for pattern, repl in (
(re_anchor_prefix, 'D'),
(re_anchor_suffix, '-'),
):
val = pattern.sub(repl, val)
return val
def output_diff_path(path):
return ' / '.join(n.source1 for n in path[1:])
def output_anchor(path):
return escape_anchor(output_diff_path(path))
def convert(s, ponct=0, tag=''):
i = 0
t = io.StringIO()
for c in s:
# used by diffs
if c == DIFFON:
t.write('<%s>' % tag)
elif c == DIFFOFF:
t.write('</%s>' % tag)
# special highlighted chars
elif c == "\t" and ponct == 1:
n = TABSIZE - (i % TABSIZE)
if n == 0:
n = TABSIZE
t.write('<span class="diffponct">\xbb</span>'+'\xa0'*(n-1))
elif c == " " and ponct == 1:
t.write('<span class="diffponct">\xb7</span>')
elif c == "\n" and ponct == 1:
t.write('<br/><span class="diffponct">\</span>')
elif ord(c) < 32:
conv = u"\\x%x" % ord(c)
t.write('<em>%s</em>' % conv)
i += len(conv)
else:
t.write(html.escape(c))
i += 1
if WORDBREAK.count(c) == 1:
t.write('\u200b')
i = 0
if i > LINESIZE:
i = 0
t.write('\u200b')
return t.getvalue()
def output_visual(visual, path, indentstr, indentnum):
logger.debug('including image for %s', visual.source)
indent = tuple(indentstr * (indentnum + x) for x in range(3))
anchor = output_anchor(path)
return u"""{0[0]}<div class="difference">
{0[1]}<div class="diffheader">
{0[1]}<div class="diffcontrol">⊟</div>
{0[1]}<div><span class="source">{1}</span>
{0[2]}<a class="anchor" href="#{2}" name="{2}">\xb6</a>
{0[1]}</div>
{0[1]}</div>
{0[1]}<div class="difference"><img src=\"data:{3},{4}\" alt=\"compared images\" /></div>
{0[0]}</div>""".format(indent, html.escape(visual.source), anchor, visual.data_type, visual.content)
def output_node_frame(difference, path, indentstr, indentnum, body):
indent = tuple(indentstr * (indentnum + x) for x in range(3))
anchor = output_anchor(path)
dctrl_class, dctrl = ("diffcontrol", u'⊟') if difference.has_visible_children() else ("diffcontrol-nochildren", u'⊡')
if difference.source1 == difference.source2:
header = u"""{0[1]}<div class="{1}">{2}</div>
{0[1]}<div><span class="diffsize">{3}</span></div>
{0[1]}<div><span class="source">{5}</span>
{0[2]}<a class="anchor" href="#{4}" name="{4}">\xb6</a>
{0[1]}</div>
""".format(indent, dctrl_class, dctrl, sizeof_fmt(difference.size()), anchor,
html.escape(difference.source1))
else:
header = u"""{0[1]}<div class="{1} diffcontrol-double">{2}</div>
{0[1]}<div><span class="diffsize">{3}</span></div>
{0[1]}<div><span class="source">{5}</span> vs.</div>
{0[1]}<div><span class="source">{6}</span>
{0[2]}<a class="anchor" href="#{4}" name="{4}">\xb6</a>
{0[1]}</div>
""".format(indent, dctrl_class, dctrl, sizeof_fmt(difference.size()), anchor,
html.escape(difference.source1),
html.escape(difference.source2))
return PartialString.numl(u"""{0[1]}<div class="diffheader">
{1}{0[1]}</div>
{2}""", 3).pformatl(indent, header, body)
def output_node(ctx, difference, path, indentstr, indentnum):
"""Returns a tuple (parent, continuation) where
- parent is a PartialString representing the body of the node, including
its comments, visuals, unified_diff and headers for its children - but
not the bodies of the children
- continuation is either None or (only in html-dir mode) a function which
when called with a single integer arg, the maximum size to print, will
print any remaining "split" pages for unified_diff up to the given size.
"""
indent = tuple(indentstr * (indentnum + x) for x in range(3))
t, cont = PartialString.cont()
comments = u""
if difference.comments:
comments = u'{0[1]}<div class="comment">\n{1}{0[1]}</div>\n'.format(
indent, "".join(u"{0[2]}{1}<br/>\n".format(indent, html.escape(x)) for x in difference.comments))
visuals = u""
for visual in difference.visuals:
visuals += output_visual(visual, path, indentstr, indentnum+1)
udiff = u""
ud_cont = None
if difference.unified_diff:
ud_cont = HTMLSideBySidePresenter().output_unified_diff(
ctx, difference.unified_diff, difference.has_internal_linenos)
udiff = next(ud_cont)
if isinstance(udiff, PartialString):
ud_cont = ud_cont.send
udiff = udiff.pformatl(PartialString.of(ud_cont))
else:
for _ in ud_cont:
pass # exhaust the iterator, avoids GeneratorExit
ud_cont = None
# PartialString for this node
body = PartialString.numl(u"{0}{1}{2}{-1}", 3, cont).pformatl(comments, visuals, udiff)
if len(path) == 1:
# root node, frame it
body = output_node_frame(difference, path, indentstr, indentnum, body)
t = cont(t, body)
# Add holes for child nodes
for d in difference.details:
child = output_node_frame(d, path + [d], indentstr, indentnum+1, PartialString.of(d))
child = PartialString.numl(u"""{0[1]}<div class="difference">
{1}{0[1]}</div>
{-1}""", 2, cont).pformatl(indent, child)
t = cont(t, child)
assert len(t.holes) >= len(difference.details) + 1 # there might be extra holes for the unified diff continuation
return cont(t, u""), ud_cont
def output_header(css_url, our_css_url=False, icon_url=None):
if css_url:
css_link = u' <link href="%s" type="text/css" rel="stylesheet" />\n' % css_url
else:
css_link = u''
if our_css_url:
css_style = u' <link href="%s" type="text/css" rel="stylesheet" />\n' % our_css_url
else:
css_style = u'<style type="text/css">\n' + templates.STYLES + u'</style>\n'
if icon_url:
favicon = icon_url
else:
favicon = u'data:image/png;base64,' + FAVICON_BASE64
return templates.HEADER % {
'title': html.escape(' '.join(sys.argv)),
'favicon': favicon,
'css_link': css_link,
'css_style': css_style
}
def output_footer(jquery_url=None):
footer = templates.FOOTER % {'version': VERSION}
if jquery_url:
return templates.SCRIPTS % {'jquery_url': html.escape(jquery_url)} + footer
return footer
@contextlib.contextmanager
def file_printer(directory, filename):
with codecs.open(os.path.join(directory, filename), 'w', encoding='utf-8') as f:
yield f.write
@contextlib.contextmanager
def spl_file_printer(directory, filename, accum):
with codecs.open(os.path.join(directory, filename), 'w', encoding='utf-8') as f:
print_func = f.write
def recording_print_func(s):
print_func(s)
recording_print_func.bytes_written += len(s)
accum.bytes_written += len(s)
recording_print_func.bytes_written = 0
yield recording_print_func
class HTMLPrintContext(collections.namedtuple("HTMLPrintContext",
"target single_page jquery_url css_url our_css_url icon_url")):
@property
def directory(self):
return None if self.single_page else self.target
class HTMLSideBySidePresenter(object):
supports_visual_diffs = True
def __init__(self):
self.max_lines = Config().max_diff_block_lines # only for html-dir
self.max_lines_parent = Config().max_page_diff_block_lines
self.max_page_size_child = Config().max_page_size_child
def new_unified_diff(self):
self.spl_rows = 0
self.spl_current_page = 0
self.spl_print_func = None
self.spl_print_ctrl = None
# the below apply to child pages only, the parent page limit works
# differently and is controlled by output_difference later below
self.bytes_max_total = 0
self.bytes_written = 0
self.error_row = None
def output_hunk_header(self, hunk_off1, hunk_size1, hunk_off2, hunk_size2):
self.spl_print_func(u'<tr class="diffhunk"><td colspan="2">Offset %d, %d lines modified</td>' % (hunk_off1, hunk_size1))
self.spl_print_func(u'<td colspan="2">Offset %d, %d lines modified</td></tr>\n' % (hunk_off2, hunk_size2))
def output_line(self, has_internal_linenos, type_name, s1, line1, s2, line2):
self.spl_print_func(u'<tr class="diff%s">' % type_name)
try:
if s1:
if has_internal_linenos:
self.spl_print_func(u'<td colspan="2" class="diffpresent">')
else:
self.spl_print_func(u'<td class="diffline">%d </td>' % line1)
self.spl_print_func(u'<td class="diffpresent">')
self.spl_print_func(convert(s1, ponct=1, tag='del'))
self.spl_print_func(u'</td>')
else:
self.spl_print_func(u'<td colspan="2">\xa0</td>')
if s2:
if has_internal_linenos:
self.spl_print_func(u'<td colspan="2" class="diffpresent">')
else:
self.spl_print_func(u'<td class="diffline">%d </td>' % line2)
self.spl_print_func(u'<td class="diffpresent">')
self.spl_print_func(convert(s2, ponct=1, tag='ins'))
self.spl_print_func(u'</td>')
else:
self.spl_print_func(u'<td colspan="2">\xa0</td>')
finally:
self.spl_print_func(u"</tr>\n")
def spl_print_enter(self, print_context, rotation_params):
# Takes ownership of print_context
self.spl_print_ctrl = print_context.__exit__, rotation_params
self.spl_print_func = print_context.__enter__()
ctx, _ = rotation_params
# Print file and table headers
self.spl_print_func(output_header(ctx.css_url, ctx.our_css_url, ctx.icon_url))
def spl_had_entered_child(self):
return self.spl_print_ctrl and self.spl_print_ctrl[1] and self.spl_current_page > 0
def spl_print_exit(self, *exc_info):
if not self.spl_had_entered_child():
return False
self.spl_print_func(output_footer())
_exit, _ = self.spl_print_ctrl
self.spl_print_func = None
self.spl_print_ctrl = None
return _exit(*exc_info)
def check_limits(self):
if not self.spl_print_ctrl[1]:
# html-dir single output, don't need to rotate
if self.spl_rows >= self.max_lines_parent:
raise DiffBlockLimitReached()
return False
else:
# html-dir output, perhaps need to rotate
if self.spl_rows >= self.max_lines:
raise DiffBlockLimitReached()
if self.spl_current_page == 0: # on parent page
if self.spl_rows < self.max_lines_parent:
return False
logger.debug("new unified-diff subpage, parent page went over %s lines", self.max_lines_parent)
else: # on child page
if self.bytes_max_total and self.bytes_written > self.bytes_max_total:
raise PrintLimitReached()
if self.spl_print_func.bytes_written < self.max_page_size_child:
return False
logger.debug("new unified-diff subpage, previous subpage went over %s bytes", self.max_page_size_child)
return True
def new_child_page(self):
_, rotation_params = self.spl_print_ctrl
ctx, mainname = rotation_params
self.spl_current_page += 1
filename = "%s-%s.html" % (mainname, self.spl_current_page)
if self.spl_current_page > 1:
# previous page was a child, close it
self.spl_print_func(templates.UD_TABLE_FOOTER % {"filename": html.escape(filename), "text": "load diff"})
self.spl_print_func(u"</table>\n")
self.spl_print_exit(None, None, None)
# rotate to the next child page
context = spl_file_printer(ctx.directory, filename, self)
self.spl_print_enter(context, rotation_params)
self.spl_print_func(templates.UD_TABLE_HEADER)
def output_limit_reached(self, limit_type, total, bytes_processed):
logger.debug('%s print limit reached', limit_type)
bytes_left = total - bytes_processed
self.error_row = templates.UD_TABLE_LIMIT_FOOTER % {
"limit_type": limit_type,
"bytes_left": bytes_left,
"bytes_total": total,
"percent": (bytes_left / total) * 100
}
self.spl_print_func(self.error_row)
def output_unified_diff_table(self, unified_diff, has_internal_linenos):
"""Output a unified diff <table> possibly over multiple pages.
It is the caller's responsibility to set up self.spl_* correctly.
Yields None for each extra child page, and then True or False depending
on whether the whole output was truncated.
"""
try:
ydiff = SideBySideDiff(unified_diff)
for t, args in ydiff.items():
if t == "L":
self.output_line(has_internal_linenos, *args)
elif t == "H":
self.output_hunk_header(*args)
elif t == "C":
self.spl_print_func(u'<td colspan="2">%s</td>\n' % args)
else:
raise AssertionError()
self.spl_rows += 1
if not self.check_limits():
continue
self.new_child_page()
new_limit = yield None
if new_limit:
self.bytes_max_total = new_limit
self.bytes_written = 0
self.check_limits()
wrote_all = True
except GeneratorExit:
return
except DiffBlockLimitReached:
self.output_limit_reached("diff block lines", len(unified_diff), ydiff.bytes_processed)
wrote_all = False
except PrintLimitReached:
self.output_limit_reached("report size", len(unified_diff), ydiff.bytes_processed)
wrote_all = False
finally:
# no footer on the last page, just a close tag
self.spl_print_func(u"</table>")
yield wrote_all
def output_unified_diff(self, ctx, unified_diff, has_internal_linenos):
self.new_unified_diff()
rotation_params = None
if ctx.directory:
mainname = md5(unified_diff)
rotation_params = ctx, mainname
try:
udiff = io.StringIO()
udiff.write(templates.UD_TABLE_HEADER)
self.spl_print_func = udiff.write
self.spl_print_ctrl = None, rotation_params
it = self.output_unified_diff_table(unified_diff, has_internal_linenos)
wrote_all = next(it)
if wrote_all is None:
assert self.spl_current_page == 1
# now pause the iteration and wait for consumer to give us a
# size-limit to write the remaining pages with
# exhaust the iterator and save the last item in wrote_all
new_limit = yield PartialString(PartialString.escape(udiff.getvalue()) + u"{0}</table>\n", None)
wrote_all = send_and_exhaust(it, new_limit, wrote_all)
else:
yield udiff.getvalue()
return
except GeneratorExit:
logger.debug("skip extra output for unified diff %s", mainname)
it.close()
self.spl_print_exit(None, None, None)
return
except:
import traceback
traceback.print_exc()
if self.spl_print_exit(*sys.exc_info()) is False:
raise
else:
self.spl_print_exit(None, None, None)
finally:
self.spl_print_ctrl = None
self.spl_print_func = None
truncated = not wrote_all
child_rows_written = self.spl_rows - self.max_lines_parent
if truncated and not child_rows_written:
# if we didn't write any child rows, just output the error message
# on the parent page
parent_last_row = self.error_row
else:
noun = "pieces" if self.spl_current_page > 1 else "piece"
text = "load diff (%s %s%s)" % (self.spl_current_page, noun, (", truncated" if truncated else ""))
parent_last_row = templates.UD_TABLE_FOOTER % {"filename": html.escape("%s-1.html" % mainname), "text": text}
yield self.bytes_written, parent_last_row
class HTMLPresenter(Presenter):
supports_visual_diffs = True
def __init__(self):
self.reset()
def reset(self):
self.report_printed = 0
self.report_limit = Config().max_report_size
@property
def report_remaining(self):
return self.report_limit - self.report_printed
def maybe_print(self, node, printers, outputs, continuations):
output = outputs[node]
node_cont = continuations[node]
if output.holes and set(output.holes) - set(node_cont):
return
# could be slightly more accurate, whatever
est_placeholder_len = max(
len(templates.UD_TABLE_FOOTER),
len(templates.UD_TABLE_LIMIT_FOOTER),
) + 40
est_size = output.size(est_placeholder_len)
results = {}
for cont in node_cont:
remaining = self.report_remaining - est_size
printed, result = cont(remaining)
self.report_printed += printed
results[cont] = result
out = output.format(results)
printer_args = printers[node]
with printer_args[0](*printer_args[1:]) as printer:
printer(out)
self.report_printed += len(out)
del outputs[node]
del printers[node]
del continuations[node]
def output_node_placeholder(self, pagename, lazy_load, size=0):
if lazy_load:
return templates.DIFFNODE_LAZY_LOAD % {
"pagename": pagename,
"pagesize": sizeof_fmt(Config().max_page_size_child),
"size": sizeof_fmt(size),
}
else:
return templates.DIFFNODE_LIMIT
def output_difference(self, ctx, root_difference):
outputs = {} # nodes to their partial output
ancestors = {} # child nodes to ancestor nodes
placeholder_len = len(self.output_node_placeholder(
"XXXXXXXXXXXXXXXX",
not ctx.single_page,
))
continuations = {} # functions to print unified diff continuations (html-dir only)
printers = {} # nodes to their printers
def smallest_first(node, parent_score):
depth = parent_score[0] + 1 if parent_score else 0
parents = parent_score[3] if parent_score else []
# Difference is not comparable so use memory address in event of a tie
return depth, node.size_self(), id(node), parents + [node]
def process_node(node, score):
path = score[3]
diff_path = output_diff_path(path)
pagename = md5(diff_path)
logger.debug('html output for %s', diff_path)
ancestor = ancestors.pop(node, None)
assert ancestor in path or (ancestor is None and node is root_difference)
node_output, node_continuation = output_node(ctx, node, path, " ", len(path)-1)
add_to_existing = False
if ancestor:
page_limit = Config().max_page_size if ancestor is \
root_difference else Config().max_page_size_child
page_current = outputs[ancestor].size(placeholder_len)
report_current = self.report_printed + \
sum(p.size(placeholder_len) for p in outputs.values())
want_to_add = node_output.size(placeholder_len)
logger.debug(
"report size: %s/%s, page size: %s/%s, want to add %s)",
report_current,
self.report_limit,
page_current,
page_limit,
want_to_add,
)
if report_current + want_to_add > self.report_limit:
make_new_subpage = False
elif page_current + want_to_add < page_limit:
add_to_existing = True
else:
make_new_subpage = not ctx.single_page
if add_to_existing:
# under limit, add it to an existing page
outputs[ancestor] = outputs[ancestor].pformat({
node: node_output,
})
stored = ancestor
else:
# over limit (or root), new subpage or continue/break
if ancestor:
placeholder = self.output_node_placeholder(
pagename,
make_new_subpage,
node.size(),
)
outputs[ancestor] = outputs[ancestor].pformat({node: placeholder})
self.maybe_print(ancestor, printers, outputs, continuations)
footer = output_footer()
# we hit a limit, either max-report-size or single-page
if not make_new_subpage:
if outputs:
# True = don't traverse this node's children,
# because they won't be output however there are
# holes in other pages, so don't break the loop
# just yet
return True
# No more holes, don't traverse any more nodes
raise StopIteration
else:
# Unconditionally write the root node regardless of limits
assert node is root_difference
footer = output_footer(ctx.jquery_url)
pagename = "index"
outputs[node] = node_output.frame(
output_header(ctx.css_url, ctx.our_css_url, ctx.icon_url) +
u'<div class="difference">\n', u'</div>\n' + footer)
assert not ctx.single_page or node is root_difference
printers[node] = (make_printer, ctx.target) if ctx.single_page \
else (file_printer, ctx.target, "%s.html" % pagename)
stored = node
for child in node.details:
logger.debug(
"scheduling future html output for: %s",
output_diff_path(path + [child]),
)
ancestors[child] = stored
conts = continuations.setdefault(stored, [])
if node_continuation:
conts.append(node_continuation)
self.maybe_print(stored, printers, outputs, continuations)
nodes = root_difference.traverse_heapq(smallest_first, yield_score=True)
prune_prev_node_descendants = None
while True:
try:
node, score = nodes.send(prune_prev_node_descendants)
prune_prev_node_descendants = process_node(node, score)
except StopIteration:
break
if outputs:
import pprint
pprint.pprint(outputs, indent=4)
assert not outputs
def ensure_jquery(self, jquery_url, basedir, default_override):
if jquery_url is None:
jquery_url = default_override
default_override = None # later, we can detect jquery_url was None
if jquery_url == 'disable' or not jquery_url:
return None
url = urlparse(jquery_url)
if url.scheme or url.netloc:
# remote path
return jquery_url
# local path
if os.path.isabs(url.path):
check_path = url.path
else:
check_path = os.path.join(basedir, url.path)
if os.path.lexists(check_path):
return url.path
for path in JQUERY_SYSTEM_LOCATIONS:
if os.path.exists(path):
os.symlink(path, check_path)
logger.debug('jquery found at %s and symlinked to %s', path, check_path)
return url.path
if default_override is None:
# if no jquery_url was given, and we can't find it, don't use it
return None
logger.warning('--jquery given, but jQuery was not found. Using it regardless.')
logger.debug('Locations searched: %s', ', '.join(JQUERY_SYSTEM_LOCATIONS))
return url.path
def output_html_directory(self, directory, difference, css_url=None, jquery_url=None):
"""
Multi-file presenter. Writes to a directory, and puts large diff tables
into files of their own.
This uses jQuery. By default it uses
/usr/share/javascript/jquery/jquery.js (symlinked, so that you can
still share the result over HTTP). You can also pass --jquery URL to
diffoscope to use a central jQuery copy.
"""
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise ValueError("%s is not a directory" % directory)
jquery_url = self.ensure_jquery(jquery_url, directory, "jquery.js")
with open(os.path.join(directory, "common.css"), "w") as fp:
fp.write(templates.STYLES)
with open(os.path.join(directory, "icon.png"), "wb") as fp:
fp.write(base64.b64decode(FAVICON_BASE64))
ctx = HTMLPrintContext(directory, False, jquery_url, css_url, "common.css", "icon.png")
self.output_difference(ctx, difference)
def output_html(self, target, difference, css_url=None, jquery_url=None):
"""
Default presenter, all in one HTML file
"""
jquery_url = self.ensure_jquery(jquery_url, os.getcwd(), None)
ctx = HTMLPrintContext(target, True, jquery_url, css_url, None, None)
self.output_difference(ctx, difference)
@classmethod
def run(cls, data, difference, parsed_args):
cls().output_html(
parsed_args.html_output,
difference,
css_url=parsed_args.css_url,
jquery_url=parsed_args.jquery_url,
)
class HTMLDirectoryPresenter(HTMLPresenter):
@classmethod
def run(cls, data, difference, parsed_args):
cls().output_html_directory(
parsed_args.html_output_directory,
difference,
css_url=parsed_args.css_url,
jquery_url=parsed_args.jquery_url,
)
|
ReproducibleBuilds/diffoscope
|
diffoscope/presenters/html/html.py
|
Python
|
gpl-3.0
| 30,030
|
import view
from world import *
DEFAULT_CAMERA_AREA = view.View.CAMERA_AREA
class LevelZero(view.View):
lives = 50
name = "Level 0: Grossini's return"
target = 5
order = 0
CAMERA_AREA=(0, -90, 0, -90)
mountainScale = 0.4
def setup_level(self):
self.step = 0.15
self.energyRegenCoef = 100
self.world.add_active( Generator((0,10), 10) )
self.world.add_passive( Segment(-10,20,10,20) )
self.world.add_passive( Goal(60,20,15.) )
self.world.add_passive( Floor(-200) )
class LevelOne(view.View):
lives = 50
name = "Level 1: Easy for Grossini"
target = 5
order = 1
CAMERA_AREA=(200, -150, -200, 150)
mountainScale = 0.6
def setup_level(self):
self.step = 0.15
self.energyRegenCoef = 100
self.world.add_active( Generator((0,10), 10 ) )
self.world.add_passive( Segment(-10,20,10,20) )
self.world.add_passive( Goal(0,60,15.) )
self.world.add_passive( Floor(-200) )
class LevelTwo(view.View):
lives = 50
name = "Level 2: Grossini can do it"
target = 10
order =2
CAMERA_AREA=(400, -300, -400, 300)
mountainScale = 0.8
def setup_level(self):
self.step = 0.20
self.energyRegenCoef = 50
self.world.add_active( Generator((0,10), 10) )
self.world.add_passive( Segment(-100,20,100,20) )
self.world.add_passive( Goal(0,60,15.) )
self.world.add_passive( Floor(-200) )
class LevelThree(view.View):
lives = 20
name = "Level 3: Grossini goes through clouds"
order =3
target = 10
CAMERA_AREA=(550, -500, -550, 400)
def setup_level(self):
self.step = 0.20
self.energyRegenCoef = 50
self.world.add_active( Generator((0,10)) )
self.world.add_passive( Segment(-100,20,100,20) )
self.world.add_passive( LimitedLifeSegment(-100,20,0,100, life=5) )
self.world.add_passive( LimitedLifeSegment(0,100,100,20, life=5) )
self.world.add_passive( Goal(0,60,15.) )
self.world.add_passive( Floor(-200) )
class LevelFour(view.View):
lives = 20
name = "Level 4: Grossini, the clouds and the ceiling"
target = 10
order = 4
def setup_level(self):
self.step = 0.15
self.energyRegenCoef = 50
self.world.add_active( Generator((0,10)) )
self.world.add_passive( Segment(-100,20,100,20) )
self.world.add_passive( Goal(0,60,15.) )
self.world.add_passive( Floor(-200) )
self.world.add_passive( Ceiling(20) )
class LevelFive(view.View):
lives = 30
name = "Level 5: Grossini, use the force"
target = 5
order = 5
def setup_level(self):
self.step = 0.15
self.energyRegenCoef = 10
self.world.add_active( Generator((0,10)) )
self.world.add_passive( Segment(-100,20,100,20) )
self.world.add_attractor( Attractor(-100,20, force=-15) )
self.world.add_attractor( Attractor(5,-50, force=2) )
self.world.add_attractor( Attractor(30,-70, force=1) )
self.world.add_attractor( Attractor(60,-75, force=1) )
self.world.add_attractor( Attractor(100,20, force=-15) )
self.world.add_passive( Goal(0,60,15.) )
self.world.add_passive( Floor(-200) )
class LevelSix(view.View):
lives = 30
name = "Level 6: Grossini and Grossini"
target = 5
order = 6
def setup_level(self):
self.step = 0.125
self.energyRegenCoef = 10
self.world.add_active( Generator((-55,10)) )
self.world.add_attractor( Attractor(-50,-20, force=1) )
self.world.add_active( Generator((55,10)) )
self.world.add_attractor( Attractor(50,-20, force=1) )
self.world.add_passive( Segment(-100,100,100,100) )
self.world.add_passive( Goal(0,150,15.) )
self.world.add_passive( Floor(-200) )
class LevelSeven(view.View):
lives = 40
name = "Level 7: Grossinis, the force, and Grossini"
target = 10
order = 7
def setup_level(self):
self.step = 0.15
self.energyRegenCoef = 10
self.world.add_active( Generator((-125,10)) )
self.world.add_active( Generator((125,10)) )
self.world.add_passive( Segment(-100,100,100,100) )
self.world.add_attractor( Attractor(-125,-50, force=-2) )
self.world.add_attractor( Attractor(-90,-70, force=3) )
self.world.add_attractor( Attractor(-25,-75, force=4) )
self.world.add_attractor( Attractor(25,-75, force=4) )
self.world.add_attractor( Attractor(90,-70, force=3) )
self.world.add_attractor( Attractor(125,-50, force=-2) )
self.world.add_passive( Goal(0,150,15.) )
self.world.add_passive( Floor(-200) )
class LevelEight(view.View):
lives = 100
name = "Level 666: Flacid Wand's Revenge"
target = 20
order = 8
textoWin = "Game finished!"
def setup_level(self):
self.step = 0.15
self.energyRegenCoef = 10
self.world.add_active( Generator((-75,10)) )
self.world.add_active( Generator((75,10)) )
self.world.add_passive( Segment(-150,100,150,100) )
self.world.add_passive( Goal(0,150,15.) )
self.world.add_passive( Floor(-200) )
self.world.add_passive( Ceiling(150) )
w = 50
xbase = -150
xstep = 150
ybase = -200
ystep = 30
for x in range(3):
for y in range(10):
self.world.add_passive( LimitedLifeSegment(
xbase+xstep*x-w,
ybase+ystep*y,
xbase+xstep*x+w,
ybase+ystep*y,
life=2) )
import types
levels = []
def cmp(l1,l2):
return l1.order.__cmp__(l2.order)
for name, klass in locals().items():
if type(klass) is types.ClassType and issubclass(klass, view.View):
levels.append( klass )
levels.sort(cmp)
for n in range(len(levels)-1):
levels[n].nextLevel = levels[n+1]
import separador
levels[-1].nextLevel = separador.Win
|
italomaia/turtle-linux
|
games/gHell/lib/levels.py
|
Python
|
gpl-3.0
| 6,134
|
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
try:
import cPickle as pickle
except:
import pickle
#connect echoRD Tools
pathdir='../echoRD' #path to echoRD
lib_path = os.path.abspath(pathdir)
#sys.path.append(lib_path)
sys.path.append('/home/ka/ka_iwg/ka_oj4748/echoRD/echoRD')
import vG_conv as vG
from hydro_tools import plotparticles_t,hydroprofile,plotparticles_specht
# Prepare echoRD
#connect to echoRD
import run_echoRD as rE
#connect and load project
[dr,mc,mcp,pdyn,cinf,vG]=rE.loadconnect(pathdir='../',mcinif='mcini_gen2',experimental=True)
mc = mcp.mcpick_out(mc,'gen_test2.pickle')
runname='gen_test2123'
mc.inimf='07moist.dat'
mc.advectref='Shipitalo'
mc.soilmatrix=pd.read_csv(mc.matrixbf, sep=' ')
mc.soilmatrix['m'] = np.fmax(1-1/mc.soilmatrix.n,0.1)
mc.md_macdepth=mc.md_depth[np.fmax(2,np.sum(np.ceil(mc.md_contact),axis=1).astype(int))]
mc.md_macdepth[mc.md_macdepth<=0.]=0.065
precTS=pd.read_csv(mc.precf, sep=',',skiprows=3)
precTS.tstart=60
precTS.tend=60+1800
precTS.total=0.06
precTS.intense=precTS.total/(precTS.tend-precTS.tstart)
#use modified routines for binned retention definitions
mc.part_sizefac=500
mc.gridcellA=mc.mgrid.vertfac*mc.mgrid.latfac
mc.particleA=abs(mc.gridcellA.values)/(2*mc.part_sizefac) #assume average ks at about 0.5 as reference of particle size
mc.particleD=2.*np.sqrt(mc.particleA/np.pi)
mc.particleV=3./4.*np.pi*(mc.particleD/2.)**3.
mc.particleV/=np.sqrt(abs(mc.gridcellA.values)) #assume grid size as 3rd dimension
mc.particleD/=np.sqrt(abs(mc.gridcellA.values))
mc.particlemass=dr.waterdensity(np.array(20),np.array(-9999))*mc.particleV #assume 20C as reference for particle mass
#DEBUG: a) we assume 2D=3D; b) change 20C to annual mean T?
mc=dr.ini_bins(mc)
mc=dr.mc_diffs(mc,np.max(np.max(mc.mxbin)))
[mc,particles,npart]=dr.particle_setup(mc)
#define bin assignment mode for infiltration particles
mc.LTEdef='instant'#'ks' #'instant' #'random'
mc.LTEmemory=mc.soilgrid.ravel()*0.
#new reference
mc.maccon=np.where(mc.macconnect.ravel()>0)[0] #index of all connected cells
mc.md_macdepth=np.abs(mc.md_macdepth)
mc.prects=False
#theta=mc.zgrid[:,1]*0.+0.273
#[mc,particles,npart]=rE.particle_setup_obs(theta,mc,vG,dr,pdyn)
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[A,B]=plotparticles_t(particles,thS/100.,mc,vG,store=True)
# Run Model
mc.LTEpercentile=70 #new parameter
t_end=24.*3600.
saveDT=True
#1: MDA
#2: MED
#3: rand
infiltmeth='MDA'
#3: RWdiff
#4: Ediss
#exfiltmeth='RWdiff'
exfiltmeth='Ediss'
#5: film_uconst
#6: dynamic u
film=True
#7: maccoat1
#8: maccoat10
#9: maccoat100
macscale=1. #scale the macropore coating
clogswitch=False
infiltscale=False
#mc.dt=0.11
#mc.splitfac=5
#pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
#import profile
#%prun -D diff_pd_prof.prof pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
wdir='/beegfs/work/ka_oj4748/gen_tests'
drained=pd.DataFrame(np.array([]))
leftover=0
output=60. #mind to set also in TXstore.index definition
dummy=np.floor(t_end/output)
t=0.
ix=0
TSstore=np.zeros((int(dummy),mc.mgrid.cells[0],2))
try:
#unpickle:
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'rb') as handle:
pickle_l = pickle.load(handle)
dummyx = pickle.loads(pickle_l)
particles = pickle.loads(dummyx[0])
[leftover,drained,t,TSstore,ix] = pickle.loads(dummyx[1])
ix+=1
print('resuming into stored run at t='+str(t)+'...')
except:
print('starting new run...')
#loop through plot cycles
for i in np.arange(dummy.astype(int))[ix:]:
plotparticles_specht(particles,mc,pdyn,vG,runname,t,i,saving=True,relative=False,wdir=wdir)
[particles,npart,thS,leftover,drained,t]=rE.CAOSpy_rundx1(i*output,(i+1)*output,mc,pdyn,cinf,precTS,particles,leftover,drained,6.,splitfac=4,prec_2D=False,maccoat=macscale,saveDT=saveDT,clogswitch=clogswitch,infilt_method=infiltmeth,exfilt_method=exfiltmeth,film=film,infiltscale=infiltscale)
TSstore[i,:,:]=rE.part_store(particles,mc)
#if i/5.==np.round(i/5.):
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'wb') as handle:
pickle.dump(pickle.dumps([pickle.dumps(particles),pickle.dumps([leftover,drained,t,TSstore,i])]), handle, protocol=2)
|
cojacoo/testcases_echoRD
|
gen_test2123.py
|
Python
|
gpl-3.0
| 4,416
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import cm
import numpy as np# reshape
from cstoolkit import drange
from matplotlib.colors import LinearSegmentedColormap
"""
cmap_cs_precp = [ (242, 242, 242), (191, 239, 255), (178, 223, 238),
(154, 192, 205), ( 0, 235, 235), ( 0, 163, 247),
(153, 255, 51),( 0, 255, 0), ( 0, 199, 0), ( 0, 143, 0),
( 0, 63, 0), (255, 255, 0),(255, 204, 0) , (255, 143, 0),
(255, 0, 0), (215, 0, 0),
(255, 0, 255) ] #, (155, 87, 203)]
"""
cmap_cs_precp = [ (242, 242, 242), (178, 223, 238), (154, 192, 205), (68, 176, 213),
( 0, 163, 247), ( 0, 235, 235), (153, 255, 51 ), ( 0, 255, 0),
( 0, 199, 0), ( 0, 143, 0), ( 0, 63, 0), (255, 255, 0),
( 255, 204, 0), (255, 143, 0), (255, 0, 0), (215, 0, 0),
(255, 0, 255) ] #, (155, 87, 203)]
WBGYR=[#(255,255,255),
#(252,254,255),
#(250,253,255),
#(247,252,254),
#(244,251,254),
#(242,250,254),
#(239,249,254),
#(236,248,253),
#(234,247,253),
#(231,246,253),
#(229,245,253),
#(226,244,253),
#(223,243,252),
#(221,242,252),
#(218,241,252),
#(215,240,252),
#(213,239,252),
#(210,238,251),
#(207,237,251),
#(205,236,251),
#(202,235,251),
#(199,234,250),
#(197,233,250),
#(194,232,250),
#(191,231,250),
#(189,230,250),
#(186,229,249),
(183,228,249),
(181,227,249),
(178,226,249),
(176,225,249),
(173,224,248),
(170,223,248),
(168,222,248),
(165,221,248),
(162,220,247),
(157,218,247),
(155,216,246),
(152,214,245),
(150,212,243),
(148,210,242),
(146,208,241),
(143,206,240),
(141,204,238),
(139,202,237),
(136,200,236),
(134,197,235),
(132,195,234),
(129,193,232),
(127,191,231),
(125,189,230),
(123,187,229),
(120,185,228),
(118,183,226),
(116,181,225),
(113,179,224),
(111,177,223),
(109,175,221),
(106,173,220),
(104,171,219),
(102,169,218),
(100,167,217),
(97,165,215),
(95,163,214),
(93,160,213),
(90,158,212),
(88,156,211),
(86,154,209),
(83,152,208),
(81,150,207),
(79,148,206),
(77,146,204),
(72,142,202),
(72,143,198),
(72,144,195),
(72,145,191),
(72,146,188),
(72,147,184),
(72,148,181),
(72,149,177),
(72,150,173),
(72,151,170),
(72,153,166),
(72,154,163),
(72,155,159),
(72,156,156),
(72,157,152),
(72,158,148),
(72,159,145),
(72,160,141),
(72,161,138),
(73,162,134),
(73,163,131),
(73,164,127),
(73,165,124),
(73,166,120),
(73,167,116),
(73,168,113),
(73,169,109),
(73,170,106),
(73,172,102),
(73,173,99),
(73,174,95),
(73,175,91),
(73,176,88),
(73,177,84),
(73,178,81),
(73,179,77),
(73,181,70),
(78,182,71),
(83,184,71),
(87,185,72),
(92,187,72),
(97,188,73),
(102,189,74),
(106,191,74),
(111,192,75),
(116,193,75),
(121,195,76),
(126,196,77),
(130,198,77),
(135,199,78),
(140,200,78),
(145,202,79),
(150,203,80),
(154,204,80),
(159,206,81),
(164,207,81),
(169,209,82),
(173,210,82),
(178,211,83),
(183,213,84),
(188,214,84),
(193,215,85),
(197,217,85),
(202,218,86),
(207,220,87),
(212,221,87),
(217,222,88),
(221,224,88),
(226,225,89),
(231,226,90),
(236,228,90),
(240,229,91),
(245,231,91),
(250,232,92),
(250,229,91),
(250,225,89),
(250,222,88),
(249,218,86),
(249,215,85),
(249,212,84),
(249,208,82),
(249,205,81),
(249,201,80),
(249,198,78),
(249,195,77),
(248,191,75),
(248,188,74),
(248,184,73),
(248,181,71),
(248,178,70),
(248,174,69),
(248,171,67),
(247,167,66),
(247,164,64),
(247,160,63),
(247,157,62),
(247,154,60),
(247,150,59),
(247,147,58),
(246,143,56),
(246,140,55),
(246,137,53),
(246,133,52),
(246,130,51),
(246,126,49),
(246,123,48),
(246,120,47),
(245,116,45),
(245,113,44),
(245,106,41),
(244,104,41),
(243,102,41),
(242,100,41),
(241,98,41),
(240,96,41),
(239,94,41),
(239,92,41),
(238,90,41),
(237,88,41),
(236,86,41),
(235,84,41),
(234,82,41),
(233,80,41),
(232,78,41),
(231,76,41),
(230,74,41),
(229,72,41),
(228,70,41),
(228,67,40),
(227,65,40),
(226,63,40),
(225,61,40),
(224,59,40),
(223,57,40),
(222,55,40),
(221,53,40),
(220,51,40),
(219,49,40),
(218,47,40),
(217,45,40),
(217,43,40),
(216,41,40),
(215,39,40),
(214,37,40),
(213,35,40),
(211,31,40),
(209,31,40),
(207,30,39),
(206,30,39),
(204,30,38),
(202,30,38),
(200,29,38),
(199,29,37),
(197,29,37),
(195,29,36),
(193,28,36),
(192,28,36),
(190,28,35),
(188,27,35),
(186,27,34),
(185,27,34),
(183,27,34),
(181,26,33),
(179,26,33),
(178,26,32),
(176,26,32),
(174,25,31),
(172,25,31),
(171,25,31),
(169,25,30),
(167,24,30),
(165,24,29),
(164,24,29),
(162,23,29),
(160,23,28),
(158,23,28),
(157,23,27),
(155,22,27),
(153,22,27),
(151,22,26),
(150,22,26),
(146,21,25)]
hotcold18= [( 24 , 24 ,112),
( 16 , 78 ,139),
( 23 ,116 ,205),
( 72 ,118 ,255),
( 91 ,172 ,237),
( 173 ,215 ,230),
( 209 ,237 ,237),
( 229 ,239 ,249),
#( 242 ,255 ,255),
( 255 ,255 ,255),
#( 253 ,245 ,230),
( 255 ,228 ,180),
( 243 ,164 , 96),
( 237 ,118 , 0),
( 205 ,102 , 29),
( 224 , 49 , 15),
#( 255, 0 , 0),
( 255, 0 , 255),
(183,75,243),
(183,75,243)]
#(255,0,255)] #,
#(81,9,121)]
"""
( 237 , 0 , 0),
( 205 , 0 , 0),
( 139 , 0 , 0)]
"""
haxby= [ (37,57,175) ,
(37,68,187) ,
(38,79,199) ,
(38,90,211) ,
(39,101,223) ,
(39,113,235) ,
(40,124,247) ,
(41,134,251) ,
(43,144,252) ,
(44,154,253) ,
(46,164,253) ,
(47,174,254) ,
(49,184,255) ,
(54,193,255) ,
(62,200,255) ,
(71,207,255) ,
(80,214,255) ,
(89,221,255) ,
(98,229,255) ,
(107,235,254) ,
(112,235,241) ,
(117,235,228) ,
(122,235,215) ,
(127,236,202) ,
(132,236,189) ,
(137,236,177) ,
(147,238,172) ,
(157,241,171) ,
(168,244,169) ,
(178,247,167) ,
(189,250,165) ,
(200,253,163) ,
(208,253,159) ,
(213,250,152) ,
(219,247,146) ,
(224,244,139) ,
(230,241,133) ,
(236,238,126) ,
(240,235,120) ,
(243,227,115) ,
(245,220,109) ,
(248,212,104) ,
(250,205,98) ,
(252,197,93) ,
(255,190,88) ,
(255,185,84) ,
(255,181,81) ,
(255,176,78) ,
(255,172,75) ,
(255,167,72) ,
(255,163,69) ,
(255,163,74) ,
(255,167,85) ,
(255,171,95) ,
(255,175,105) ,
(255,179,115) ,
(255,183,126) ,
(255,189,139) ,
(255,200,158) ,
(255,211,178) ,
(255,222,197) ,
(255,233,216) ,
(255,244,236) ,
(255,255,255) ]
BWR=[ ( 36 , 0 , 216),
( 24 , 28 , 247),
( 40 , 87 , 255),
( 61 , 135 , 255),
( 86 , 176 , 255),
( 117 , 211 , 255),
( 153 , 234 , 255),
( 188 , 249 , 255),
( 234 , 255 , 255),
( 255 , 255 , 255),
( 255 , 241 , 188),
( 255 , 214 , 153),
( 255 , 172 , 117),
( 255 , 120 , 86),
( 255 , 61 , 61),
#( 247 , 39 , 53),
( 165 , 0 , 33)]
"""
( 216 , 21 , 47),
( 165 , 0 , 33)]
"""
BWR=[ #( 0 , 0 , 0),
( 16 , 78 , 139),
#( 23 , 116 , 205),
#( 61 , 135 , 255),
( 86 , 176 , 255),
( 117 , 211 , 255),
( 153 , 234 , 255),
( 188 , 249 , 255),
( 234 , 255 , 255),
( 255 , 255 , 255),
( 255 , 241 , 188),
( 255 , 214 , 153),
( 255 , 172 , 117),
( 255 , 120 , 86),
( 255 , 61 , 61),
( 165 , 0 , 33)]
#( 247 , 39 , 53)]
tableau20 = [ (127, 127, 127),(174, 199, 232), (31, 119, 180), (255, 187, 120),
(214, 39, 40),(152, 223, 138), (44, 160, 44), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (255, 127, 14),(199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229),(65,68,81),(0,0,0)]
def buildcmp(cmaplist):
for i in range(len(cmaplist)):
r, g, b = cmaplist[i]
cmaplist[i] = (r / 255., g / 255., b / 255.)
return LinearSegmentedColormap.from_list( "precip", cmaplist,N=len(cmaplist)),cmaplist
cmap_cs_precp,cs_precp_list=buildcmp(cmap_cs_precp)
cmap_haxby,haxby_list=buildcmp(haxby[::5])
cmap_BWR,BWR_list=buildcmp(BWR)
cmap_BWR.set_over('purple')
cmap_BWR.set_under('blue')
cmap_cs_precp.set_over('purple')
cmap_tableau20,tableau20=buildcmp(tableau20)
cmap_hotcold18,hotcold18=buildcmp(hotcold18)
cmap_hotcold18.set_over('blueviolet')
cmap_hotcold18.set_under('black')
cmap_WBGYR,WBGYR=buildcmp(WBGYR)
sim_nicename={"ERI":"ERI",
"ERI_CAR":"CWRF-CAR",
"cor":"Inter-annual cor of",
"Xcor":"Cor between ori of",
"Xcorbias":"Cor between bias of",
"RegCM":"RegCM4.6",
"ConRatio":"Conv to Total Pr in PCT95 day",
"PCT":"Extreme Precipitation",
"RAINYDAYS":"Days with Precipitation",
"NX":"North Xinjiang",
"SX":"South Xinjiang",
"WT":"West Tibet",
"ET":"East Tibet",
"ST":"South Tibet",
"IM":"Inner Mongolia",
"SW":"Southwest",
"NE":"Northeast",
"NC":"North China",
"CC":"Central China",
"SC":"South China",
"T2MAX":"T2X",
"AT2M" :"T2M",
"T2MIN":"T2N",
"PRAVG":"PR",
"AT2M97":"A97",
"SDII":"DI",
"CN_OBS":"OBS",
#"RAINYDAYS":"RD",
"run_RegCM4.6":"RegCM\n4.6",
"run_RegCM4.5":"RegCM\n4.5",
"ERI_run_0":"old CWRF",
"new_ERI_run_0":"CWRF\nMor",
"new_ERI_gsfc":"CWRF",
"new_ERI_albedo":"CWRF",
# "new_ERI_gsfc":"CWRF\nGSFC",
"new_ERI_morr":"Mor",
"run_00":"CTL",
"xoml":"new_xoml",
"run_01":"BMJ",
"run_02":"NKF",
"run_03":"NSAS",
"run_04":"TDK",
"run_06":"MB",
"run_06":"THO",
"run_07":"MOR",
"run_08":"WD6",
"run_09":"AER",
"run_10": "XR", # "Radall",
"run_11":"CCCMA",
"run_12":"FLG",
"run_13":"RRTMG",
"run_14":"MYNN",
"run_15":"ACM",
"run_16":"UW",
"run_17":"NOAH",
"run_18":"XOML",
"run_19":"F-M",
"run_20":"FMBMJ",
"run_21":"FMNKF",
"run_22":"FMNSAS",
"run_23":"FMTDK",
"run_24":"FMMB",#"scheme_cst_2",
"run_25":"FMTHO",#"scheme_cst_3",
"run_26":"FMMOR",#"scheme_cst_3",
"run_27":"boulac",#"scheme_ccb_1",
"run_28":"gfs",#"scheme_ccb_4",
"run_29":"mynn2",#"scheme_ccb_5",
"run_30":"new cloud",#"scheme_ccs_3",
"run_31":"boulac", #"NewTHO",
"run_32":"gfs2", #"NewMOR",
"run_33":"", #"NewMOR",
"run_34":"New Melt", #"NewMOR",
"run_35":"old CAM", #"NewMOR",
"run_36":"NewSW", #"NewMOR",
"run_37":"ACM", #"NewMOR",
"run_38":"bedrock", #"NewMOR",
"run_39":"CF", #"NewMOR",
"run_40":"NewDrain V0", #"NewMOR",
"run_41":"Warm start V1", #"NewMOR",
"run_42":"Cold start V1", #"NewMOR",
"run_43":"inflx ", #"NewMOR",
"run_44":"om ", #"NewMOR",
"run_45":"New Soil Water", #"NewMOR",
"run_46":"New Reff", #"NewMOR",
"run_47":"OISST", #"NewMOR",
"run_48":"NOoml", #"NewMOR",
"run_49":"NOocean", #"NewMOR",
"run_50":"MSA_newSW", #"ERIsst"
"run_51":"NoMSA ipf0", #"NewMOR",
"run_52":"new UWCAM", #"NewMOR",
"run_53":"NoMSA ipf2", #"NewMOR",
"run_54":"AERO_MSAon", #"NewMOR",
"run_55":"AERO_MSAold", #"NewMOR",
"run_56":"noAERO", #"NewMOR",
"run_57":"OBC_V0", #"SVNcode", #"NewMOR",
"run_58":"OBClg100", #"NewMOR",
"run_59":"OBClg111", #"NewMOR",
"run_60":"WRF", #"NewMOR",
"run_61":"ALBfix", #"NewMOR",
"run_62":"PSFC4_NSW", #"NewMOR",
"run_63":"PSFC4_OSW", #"NewMOR",
"run_64":"psfc4_osw_CAMUW", #"NewMOR",
"run_65":"git558faed", #"NewMOR",
"run_66":"psfc4morr", #"NewMOR",
"run_67":"newsw_morr", #"NewMOR",
"run_68":"psfc4_osw_v2", #"NewMOR",
"run_69":"WRFRUN", #
"run_70":"PSFC4_NSW", #oldini0
"run_71":"PSFC4_V0", #"PSFC4_SVNCODE"
"run_72":"OBC_OSW" , #"oldBC_osw"
"run_73":"PSFC4_br_OSW" , #"oldBC_osw"
"run_74":"OLDini_br_NSW" , #"oldBC_osw"
"run_75":"OLDini_br_V0" , #"oldBC_osw"
"run_76":"OLDini_br_558faed" , #"oldBC_osw"
"run_77":"OVEG_NSW" , #"oldBC_osw"
"run_78":"OVEG_OSW" , #"oldBC_osw"
"run_79":"OVEG_V0" , #"oldBC_osw"
"run_80":"HydRED" , #"oldBC_osw"
"run_81":"CTL" , #"oldBC_osw"
"run_82":"newcam" , #"oldBC_osw"
"run_oldSW_flw8_new":"CWRF",
"ERI_run_1":"CWRF/CLM4.5",
"CESM_run_0":"CWRF/CSSP",
"CESM_run_1":"CWRF/CLM4.5",
"PCR85-CESM_run_0":"CWRF/CSSP",
"PCR85-CESM_run_1":"CWRF/CLM4.5",
"run_CTL":"CTL ",
"CESM":"CESM",
"run_CLM4.5":"CLM4.5Hyd ",
"run_Red":"HydRed ",
"run_noxoml":"NO xoml ",
"run_nolake":"NO lake ",
"run_oldrad" :"Old Alb ",
"run_oldveg":"Old LAI ",
"run_noforzen":"Old frozen ",
"Mean":"Mean",
"Mean_Sub":"Mean_Sub",
"Med":"Med",
"P85":"P85",
"P80":"P80",
"P70":"P70",
"P10":"P10",
"P20":"P20",
"Obs":"OBS",
"OBS":"OBS",
"Max":"Max",
"run_1":"MY/MO/W1.5/MC0.5/TD0",
"run_2":"CAM/GSFC/W1.5/MC0.75/TD0",
"run_3":"MY/MO/W1.5/MC0.75/TD0",
"run_4":"MY/MO/W1/MC0.75/TD0",
"run_5":"MY/MO/W1/MC0.75/TD0.5",
"run_6":"MY/MO/W1/MC1/TD0",
"run_7":"MY/MO/W1/MC1/TD1"}
#plotres={'PRAVG':{},'PCT':{},'CDD':{},'RAINYDAYS':{},'AT2M':{},'ASWUPT':{}}
from collections import defaultdict
plotres= defaultdict(dict)
##########################set the plot related parameters#####################
#plotres['XRSUR']['cleve1']=[x*1e-6 for x in range(31)]
plotres['XRSUR']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['XRSUR']['cmp2']=cmp
#plotres['XRSUR']['convertcoef']=0.001
plotres['XRSUR']['unit']="kg/m2/day"
plotres['XRSUR']['mask']=True
plotres['XRSUR']['violion']=False
#plotres['XRBAS']['cleve1']=[x*1e-6 for x in range(31)]
plotres['XRBAS']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['XRBAS']['cmp2']=cmp
plotres['XRBAS']['unit']="kg/m2/day"
plotres['XRBAS']['mask']=True
plotres['XRBAS']['violion']=False
#plotres['SFROFF']['cleve1']=[x*10000 for x in range(31)]
plotres['SFROFF']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['SFROFF']['cmp2']=cmp
#plotres['SFROFF']['convertcoef']=0.001
plotres['SFROFF']['unit']="kg/m2"
plotres['SFROFF']['mask']=True
plotres['SFROFF']['violion']=False
#plotres['XSMTg']['cleve1']=[x*20 for x in range(1,20)] #range(0, 1,0.05)
plotres['XSMTg']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['XSMTg']['cmp2']=cmp
plotres['XSMTg']['unit']="kg/m2"
plotres['XSMTg']['mask']=True
plotres['XSMTg']['violion']=False
plotres['XSMTg']['vlevel']=4
#plotres['AODNIR']['cleve0']=[x*0.05 for x in range(0,11)] #range(0, 1,0.05)
#plotres['AODNIR']['cleve1']=[x*0.05 for x in range(0,11)] #range(0, 1,0.05)
plotres['AODNIR']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['AODNIR']['cmp2']=cmp
#plotres['AODNIR']['convertcoef']=0.01
plotres['AODNIR']['unit']=""
plotres['AODNIR']['mask']=True
#plotres['AODVIS']['cleve0']=[x*0.05 for x in range(0,11)] #range(0, 1,0.05)
#plotres['AODVIS']['cleve1']=[x*0.05 for x in range(0,11)] #range(0, 1,0.05)
plotres['AODVIS']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['AODVIS']['cmp2']=cmp
#plotres['AODVIS']['convertcoef']=0.01
plotres['AODVIS']['unit']=""
plotres['AODVIS']['mask']=True
#plotres['CLDFRAh']['cleve1']=[x*0.05 for x in range(0,21)] #range(0, 1,0.05)
plotres['CLDFRAh']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['CLDFRAh']['cmp2']=cmp
#plotres['CLDFRAh']['convertcoef']=0.01
plotres['CLDFRAh']['unit']=""
plotres['CLDFRAh']['mask']=True
plotres['CLDFRAh']['violion']=False
plotres['CLDFRAh']['vlevel']=3
#plotres['CLDFRAm']['cleve1']=[x*0.05 for x in range(0,21)] #range(0, 1,0.05)
plotres['CLDFRAm']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['CLDFRAm']['cmp2']=cmp
#plotres['CLDFRAm']['convertcoef']=0.01
plotres['CLDFRAm']['unit']=""
plotres['CLDFRAm']['mask']=True
plotres['CLDFRAm']['violion']=False
plotres['CLDFRAm']['vlevel']=2
#plotres['CLDFRAl']['cleve1']=[x*0.05 for x in range(0,21)] #range(0, 1,0.05)
plotres['CLDFRAl']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['CLDFRAl']['cmp2']=cmp
#plotres['CLDFRAl']['convertcoef']=0.01
plotres['CLDFRAl']['unit']=""
plotres['CLDFRAl']['mask']=True
plotres['CLDFRAl']['violion']=False
plotres['CLDFRAl']['vlevel']=1
#plotres['CLDFRA']['cleve1']=[x*0.05 for x in range(0,21)] #range(0, 1,0.05)
plotres['CLDFRA']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['CLDFRA']['cmp2']=cmp
#plotres['CLDFRA']['convertcoef']=0.01
plotres['CLDFRA']['unit']=""
plotres['CLDFRA']['mask']=True
plotres['CLDFRA']['violion']=False
plotres['CLDFRA']['vlevel']=0
#plotres['QVAPOR']['cleve1']=range(0, 20,1)
plotres['QVAPOR']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['QVAPOR']['cmp2']=cmp
plotres['QVAPOR']['convertcoef']=1000
plotres['QVAPOR']['unit']="$g/kg$"
plotres['QVAPOR']['mask']=False
plotres['QVAPOR']['violion']=False
plotres['QVAPOR']['vlevel']=21
#plotres['TCWPC']['cleve1']=range(0, 200,10)
plotres['TCWPC']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['TCWPC']['cmp2']=cmp
plotres['TCWPC']['unit']="$g/m^{2}$"
plotres['TCWPC']['mask']=True
plotres['TCWPC']['violion']=False
#plotres['V']['cleve1']=range(-10, 10,1)
plotres['V']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['V']['cmp2']=cmp
plotres['V']['unit']="$m/s$"
plotres['V']['mask']=False
plotres['V']['violion']=False
plotres['V']['vlevel']=21
#plotres['U']['cleve1']=range(-10, 10,1)
plotres['U']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['U']['cmp2']=cmp
plotres['U']['unit']="$m/s$"
plotres['U']['mask']=False
plotres['U']['violion']=False
plotres['U']['vlevel']=21
#plotres['PSL']['cleve1']=range(1000, 1024,1)
plotres['PSL']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['PSL']['cmp2']=cmp
plotres['PSL']['unit']="$\%$"
plotres['PSL']['convertcoef']=0.01
plotres['PSL']['mask']=False
plotres['PSL']['violion']=False
#plotres['PS']['cleve1']=range(700, 1030,5)
plotres['PS']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['PS']['cmp2']=cmp
plotres['PS']['unit']="$\%$"
plotres['PS']['convertcoef']=0.01
plotres['PS']['mask']=False
plotres['PS']['violion']=False
#plotres['ALBEDO']['cleve1']=range(0, 60,5)
plotres['ALBEDO']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['ALBEDO']['cmp2']=cmp
plotres['ALBEDO']['unit']="$\%$"
plotres['ALBEDO']['convertcoef']=100
plotres['ALBEDO']['mask']=False
plotres['ALBEDO']['violion']=False
#plotres['ASWUPT']['cleve1']=range(80,160,10)
plotres['ASWUPT']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['ASWUPT']['cmp2']=cmp
plotres['ASWUPT']['unit']="$W m^{-2}$"
plotres['ASWUPT']['mask']=True
plotres['ASWUPT']['violion']=False
#plotres['ASWUPS']['cleve1']=range(0,210,10)
plotres['ASWUPS']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['ASWUPS']['cmp2']=cmp
plotres['ASWUPS']['unit']="$W m^{-2}$"
plotres['ASWUPS']['mask']=True
plotres['ASWUPS']['violion']=False
#plotres['ALWDNS']['cleve1']=range(20,410,50)
#plotres['ALWDNS']['cleve0']=range(20,410,10)
plotres['ALWDNS']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['ALWDNS']['cmp2']=cmp
plotres['ALWDNS']['unit']="$W m^{-2}$"
plotres['ALWDNS']['mask']=True
plotres['ALWDNS']['violion']=False
#plotres['ASWDNS']['cleve1']=range(20,410,50)
#plotres['ASWDNS']['cleve0']=range(20,410,10)
plotres['ASWDNS']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['ASWDNS']['cmp2']=cmp
plotres['ASWDNS']['unit']="$W m^{-2}$"
plotres['ASWDNS']['mask']=True
plotres['ASWDNS']['violion']=False
#plotres['ALWUPS']['cleve1']=range(200,510,10)
plotres['ALWUPS']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['ALWUPS']['cmp2']=cmp
plotres['ALWUPS']['unit']="$W m^{-2}$"
plotres['ALWUPS']['mask']=True
plotres['ALWUPS']['violion']=False
#plotres['ALWDNS']['cleve1']=range(150,450,10)
plotres['ALWDNS']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['ALWDNS']['cmp2']=cmp
plotres['ALWDNS']['unit']="$W m^{-2}$"
plotres['ALWDNS']['mask']=True
plotres['ALWDNS']['violion']=False
#plotres['ALWUPT']['cleve1']=range(150,360,10)
plotres['ALWUPT']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['ALWUPT']['cmp2']=cmp
plotres['ALWUPT']['unit']="$W m^{-2}$"
plotres['ALWUPT']['mask']=True
plotres['ALWUPT']['violion']=False
#plotres['PrMAX']['cleve0']=range(1,35)
#plotres['PrMAX']['cleve1']=range(0,51,5)
# import colormaps as cmaps
# cmp=cmap=cmaps.viridis
plotres['PrMAX']['cmp1']=plt.get_cmap('jet')
#plotres['PrMAX']['cmp1']=cm.s3pcpn
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['PrMAX']['cmp2']=cmp
plotres['PrMAX']['unit']="mm/day"
plotres['PrMAX']['convertcoef']=60*60*24
plotres['PrMAX']['mask']=True
plotres['PrMAX']['violion']=True
#plotres['PRAVG']['cleve1']=[0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5,6,7,8,9,10,11,12,13,14]
#plotres['PRAVG']['cleve3']=range(10)
plotres['PRAVG']['cmp1']=cmap_cs_precp
cmp =plt.get_cmap('Spectral_r');cmp.set_over('maroon');cmp.set_under('w')
plotres['PRAVG']['cmp3']=plt.get_cmap('RdYlBu_r') #cmap_WBGYR #plt.get_cmap('jet')
cmp =cmap_BWR
plotres['PRAVG']['cmp2']=cmp
plotres['PRAVG']['unit']="mm/day"
plotres['PRAVG']['violion']=True
#plotres['R95T']['cleve1']=[x*0.04 for x in range(0,21)] #range(0, 1,0.05)
#plotres['R95T']['cleve0']=[x*0.04 for x in range(0,21)] #range(0, 1,0.05)
plotres['R95T']['cmp1']=plt.get_cmap('jet')
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['R95T']['cmp2']=cmp
plotres['R95T']['unit']=""
plotres['R95T']['convertcoef']=1
#plotres['PCT']['cleve0']=[0,2,4,6,8,10,15,20,25,30,40,50,60]
#plotres['PCT']['cleve1']=[2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,35,40,45,50]
plotres['PCT']['cmp1']=cmap_cs_precp
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('w')
plotres['PCT']['cmp2']=cmp
plotres['PCT']['unit']="mm/day"
plotres['PCT']['convertcoef']=1
plotres['ConRatio']['cmp1']=cmap_cs_precp
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('w')
plotres['ConRatio']['cmp2']=cmp
plotres['ConRatio']['unit']=""
#plotres['PCT99']['cleve0']=[0,2,4,6,8,10,15,20,25,30,40,50,60]
#plotres['PCT99']['cleve1']=[2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,35,40,45,50]
plotres['PCT99']['cmp1']=cmap_cs_precp
cmp =plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('w')
plotres['PCT99']['cmp2']=cmp
plotres['PCT99']['unit']="mm/day"
plotres['PCT99']['convertcoef']=1
#plotres['CDD']['cleve0']=[-20,-18,-16,-14,-10,-8,-6,-4,-2,2,4,6,8,10,12,14,16,18,20,22]
#plotres['CDD']['cleve1']=[4,6,8,10,12,14,16,18,20,22,24,26,28,30,35,40,45,50]
plotres['CDD']['cmp1']=cmap_cs_precp
plotres['CDD']['cmp2']=None
plotres['CDD']['unit']="day"
plotres['CDD']['convertcoef']=1
plotres['CDD']['mask']=True
#plotres['SDII']['cleve0']=range(1,15)
#plotres['SDII']['cleve1']=range(1,20)
plotres['SDII']['cmp1']=cmap_cs_precp
plotres['SDII']['cmp2']=None
plotres['SDII']['unit']="mm/day"
plotres['SDII']['convertcoef']=1
plotres['SDII']['mask']=True
#plotres['R5D']['cleve0']=[2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,35,40,45,50]
#plotres['R5D']['cleve1']=[2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,35,40,45,50]
plotres['R5D']['cmp1']=cmap_cs_precp
plotres['R5D']['cmp2']=None
plotres['R5D']['unit']="mm/day"
plotres['R5D']['convertcoef']=1 # divided by 5 days
plotres['R5D']['mask']=True
#plotres['R10']['cleve0']=[2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,35,40,45,50]
#plotres['R10']['cleve1']=[2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,35,40,45,50]
plotres['R10']['cmp1']=cmap_cs_precp
plotres['R10']['cmp2']=None
plotres['R10']['unit']="day"
plotres['R10']['convertcoef']=1
plotres['R10']['mask']=True
#plotres['RAINYDAYS']['cleve0']=range(5,95,5)
#plotres['RAINYDAYS']['cleve1']=range(5,95,5)
plotres['RAINYDAYS']['cmp1']=cmap_cs_precp
plotres['RAINYDAYS']['cmp2']=None
plotres['RAINYDAYS']['unit']="day"
plotres['RAINYDAYS']['convertcoef']=1
plotres['RAINYDAYS']['mask']=True
#plotres['T2MAX']['cleve1']=range(-10,41)
#plotres['T2MAX']['cleve0']=[-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7]
#plotres['T2MAX']['cmp1']=plt.get_cmap('jet')
plotres['T2MAX']['cmp1']=cmap_cs_precp
plotres['T2MAX']['cmp1']=plt.get_cmap('jet')
cmp =cmap_BWR
plotres['T2MAX']['cmp2']=cmp
plotres['T2MAX']['unit']="$^\circ$C"
plotres['T2MAX']['convertcoef']=1
plotres['T2MAX']['mask']=True
plotres['T2MAX']['valuemask']=True
plotres['T2MAX']['shift']=-273.15
#plotres['T2MIN']['cleve1']=range(-10,41)
#plotres['T2MIN']['cleve0']=[-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7]
#plotres['T2MIN']['cmp1']=plt.get_cmap('jet')
#plotres['T2MIN']['cmp1']=cmap_cs_precp
plotres['T2MIN']['cmp1']=plt.get_cmap('jet')
cmp =cmap_BWR
plotres['T2MIN']['cmp2']=cmp
plotres['T2MIN']['unit']="$^\circ$C"
plotres['T2MIN']['convertcoef']=1
plotres['T2MIN']['mask']=True
plotres['T2MIN']['valuemask']=True
plotres['T2MIN']['shift']=-273.15
#plotres['AT2M']['cleve0']=[-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7]
#plotres['AT2M']['cleve1']=range(-10,31,2)
#plotres['AT2M']['cleve3']=range(10)
plotres['AT2M']['cmp1']=plt.get_cmap('jet')
cmp =cmap_BWR
plotres['AT2M']['cmp2']=cmp
plotres['AT2M']['unit']="$^\circ$C"
plotres['AT2M']['convertcoef']=1
plotres['AT2M']['valuemask']=True
plotres['AT2M']['shift']=-273.15
#plotres['AT2M97']['cleve0']=[-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
#plotres['AT2M97']['cleve1']=range(-15,35,2)
#plotres['AT2M97']['cleve3']=range(10)
plotres['AT2M97']['cmp1']=plt.get_cmap('gist_rainbow_r')
cmp = plt.get_cmap('PuOr_r') #plt.get_cmap('seismic');cmp.set_over('maroon');cmp.set_under('b')
plotres['AT2M97']['cmp2']=cmp
plotres['AT2M97']['unit']="$^\circ$C"
plotres['AT2M97']['convertcoef']=1
plotres['AT2M97']['valuemask']=True
plotres['AT2M97']['shift']=-273.15
#plotres['DTR']['cmp1']=cmap_cs_precp
plotres['DTR']['cmp1']=plt.get_cmap('jet')
cmp =cmap_BWR
plotres['DTR']['cmp2']=cmp
plotres['DTR']['unit']="$^\circ$C"
plotres['DTR']['convertcoef']=1
plotres['DTR']['valuemask']=True
plotres['RH']['cmp1']=plt.get_cmap('viridis_r')
cmp =cmap_BWR
plotres['RH']['cmp2']=cmp
plotres['RH']['unit']="$\%$"
plotres['RH']['convertcoef']=1
plotres['RH']['valuemask']=True
plotres['WIN']['cmp1']=cmap_haxby
cmp =cmap_BWR
plotres['WIN']['cmp2']=cmp
plotres['WIN']['unit']="$m/s$"
plotres['WIN']['convertcoef']=1
plotres['WIN']['valuemask']=True
plotres['GUST']['cmp1']=cmap_haxby
cmp =cmap_BWR
plotres['GUST']['cmp2']=cmp
plotres['GUST']['unit']="$m/s$"
plotres['GUST']['convertcoef']=1
plotres['GUST']['valuemask']=True
|
sunchaoatmo/cplot
|
plotset.py
|
Python
|
gpl-3.0
| 28,891
|
import bench
class Stats(bench.Bench):
def __init__(self, league):
bench.Bench.__init__(self)
self.league = league
self.type = 'stats'
def list(self, team=False, player=False):
"""
Lists all stats for the current season to date. Can be filtered by team or by player. Default will return stat
dump for whole league
:param team: Unique ID of the team to filter for
:param player: Unique ID of the player to filter for
:return:
"""
def get_player_stats(self, player, week=False):
"""
Lists the stat breakdown by week for a given player. Can also be filtered to only return a specific week or a
range of weeks
:param player: Unique ID of the player to filter for
:param week: Optional. Can be a single week or a range ex: 1-4. If blank will default to season to date
:return:
"""
|
sroche0/mfl-pyapi
|
modules/stats.py
|
Python
|
gpl-3.0
| 929
|
from django.conf.urls import url
from rpi.beehive.views import AddBeehiveView, delete_readering_view, \
ChartReaderingView, export_view, ListReaderingView, DeleteBeehiveView, \
ModifyBeehiveView, summary_view
urlpatterns = [
url(r'^ajouter$', AddBeehiveView.as_view(), name='add-beehive'),
url(r'^(?P<pk>\d+)/$', summary_view, name='summary'),
url(r'^(?P<pk>\d+)/voir/tableau/$', ListReaderingView.as_view(),
name='table'),
url(r'^(?P<pk>\d+)/voir/graphiques/$', ChartReaderingView.as_view(),
name='charts'),
url(r'^(?P<pk>\d+)/exporter/$', export_view, name='export'),
url(r'^(?P<pk>\d+)/modifier/$', ModifyBeehiveView.as_view(),
name='modify-beehive'),
url(r'^(?P<pk>\d+)/supprimer/$', DeleteBeehiveView.as_view(),
name='delete-beehive'),
url(r'^supprimer-releve/(?P<pk>\d+)/$', delete_readering_view,
name='delete-readering'),
]
|
RuchePI/Site
|
rpi/beehive/urls.py
|
Python
|
gpl-3.0
| 915
|
"""Define related tools for web.archive.org (aka Wayback Machine)."""
import logging
from threading import Thread
from datetime import date
from urllib.parse import urlparse
from regex import compile as regex_compile
from requests import ConnectionError as RequestsConnectionError
from lib.commons import dict_to_sfn_cit_ref
from lib.urls import (
urls_scr, url2dict, get_home_title, get_html, find_authors,
find_journal, find_site_name, find_title, ContentTypeError,
ContentLengthError, StatusCodeError, TITLE_TAG
)
URL_FULLMATCH = regex_compile(
r'https?+://web(?:-beta)?+\.archive\.org/(?:web/)?+'
r'(\d{4})(\d{2})(\d{2})\d{6}(?>cs_|i(?>d_|m_)|js_)?+/(http.*)'
).fullmatch
def waybackmachine_scr(
archive_url: str, date_format: str = '%Y-%m-%d'
) -> tuple:
"""Create the response namedtuple."""
m = URL_FULLMATCH(archive_url)
if not m:
# Could not parse the archive_url. Treat as an ordinary URL.
return urls_scr(archive_url, date_format)
archive_year, archive_month, archive_day, original_url = \
m.groups()
original_dict = {}
thread = Thread(
target=original_url2dict, args=(original_url, original_dict)
)
thread.start()
try:
archive_dict = url2dict(archive_url)
except (ContentTypeError, ContentLengthError) as e:
logger.exception(archive_url)
# Todo: i18n
return 'Invalid content type or length.', e, ''
archive_dict['date_format'] = date_format
archive_dict['url'] = original_url
archive_dict['archive-url'] = archive_url
archive_dict['archive-date'] = date(
int(archive_year), int(archive_month), int(archive_day)
)
thread.join()
if original_dict:
# The original_process has been successful
if (
original_dict['title'] == archive_dict['title']
or original_dict['html_title'] == archive_dict['html_title']
):
archive_dict.update(original_dict)
archive_dict['url-status'] = 'live'
else:
# and original title is the same as archive title. Otherwise it
# means that the content probably has changed and the original data
# cannot be trusted.
archive_dict['url-status'] = 'unfit'
else:
archive_dict['url-status'] = 'dead'
if archive_dict['website'] == 'Wayback Machine':
archive_dict['website'] = (
urlparse(original_url).hostname.replace('www.', '')
)
return dict_to_sfn_cit_ref(archive_dict)
def original_url2dict(ogurl: str, original_dict) -> None:
"""Fill the dictionary with the information found in ogurl."""
# noinspection PyBroadException
try:
original_dict.update(original_url_dict(ogurl))
except (
ContentTypeError,
ContentLengthError,
StatusCodeError,
RequestsConnectionError,
):
pass
except Exception:
logger.exception(
'There was an unexpected error in waybackmechine thread'
)
def original_url_dict(url: str):
"""Retuan dictionary only containing required data for og:url."""
d = {}
# Creating a thread to request homepage title in background
hometitle_list = [] # A mutable variable used to get the thread result
home_title_thread = Thread(
target=get_home_title, args=(url, hometitle_list)
)
home_title_thread.start()
html = get_html(url)
m = TITLE_TAG(html)
html_title = m['result'] if m else None
if html_title:
d['html_title'] = html_title
authors = find_authors(html)
if authors:
d['authors'] = authors
journal = find_journal(html)
if journal:
d['journal'] = journal
d['cite_type'] = 'journal'
else:
d['cite_type'] = 'web'
d['website'] = find_site_name(
html, html_title, url, authors, hometitle_list, home_title_thread
)
d['title'] = find_title(
html, html_title, url, authors, hometitle_list, home_title_thread
)
return d
logger = logging.getLogger(__name__)
|
5j9/yadkard
|
lib/waybackmachine.py
|
Python
|
gpl-3.0
| 4,099
|
#
# Copyright (c) 2016 SUSE Linux GmbH
#
# This file is part of dbxincluder.
#
# dbxincluder is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dbxincluder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dbxincluder. If not, see <http://www.gnu.org/licenses/>.
import glob
import os
import pytest
def pytest_generate_tests(metafunc):
"""Replace the xmltestcases fixture by all *.case.xml files in tests/cases"""
if "xmltestcase" in metafunc.fixturenames:
location = os.path.dirname(os.path.realpath(__file__))
testcases = glob.glob(location + "/cases/*.case.xml")
testcases.sort() # Sort them alphabetically
metafunc.parametrize("xmltestcase", testcases)
|
openSUSE/dbxincluder
|
tests/conftest.py
|
Python
|
gpl-3.0
| 1,148
|
import re
import json
import requests
from util import irc
from util.handler_utils import cmdhook, authenticate, get_target
from qtbot3_common.types.message import Message
def scrape(board: str, filtertext: str):
try:
data = requests.get("http://boards.4chan.org/{board}/catalog".format(board=board)).text
match = re.match(".*var catalog = (?P<catalog>\{.*\});.*", data)
if not match:
print("Couldn't scrape catalog")
catalog = json.loads(match.group('catalog'))
for number, thread in catalog['threads'].items():
sub, teaser = thread['sub'], thread['teaser']
if filtertext in sub.lower() or filtertext in teaser.lower():
yield(number, thread)
except Exception as ex:
print("scraping exception:", ex)
@cmdhook('4chan (?P<board>[^\s]+) (?P<filtertext>.+)')
@authenticate
def handle_scrape(message: Message, match, nick: str):
board = match['board']
filtertext = match['filtertext']
print("searching 4chan's {board} board for {filtertext}...".format(**match))
baseurl = "http://boards.4chan.org/{board}/thread/{number}/{semantic_url}"
lines = []
for number, thread in scrape(board, filtertext):
title = (thread['sub'] + ': ' + baseurl).format(number=number, board=board, **thread)
lines.append(title + ' - ' + thread['teaser'])
target = get_target(message, nick)
return [irc.chat_message(target, line) for line in lines[:3]]
|
yukaritan/qtbot3
|
qtbot3_service/plugins/4chan.py
|
Python
|
gpl-3.0
| 1,490
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#==============================================================================#
# #
# Copyright 2011 Carlos Alberto da Costa Filho #
# #
# This file is part of Notorius. #
# #
# Notorius is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# Notorius is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#==============================================================================#
""" Image Label. """
import datetime
from PyQt4 import QtCore, QtGui
from note import Note
from constants import *
from icons import *
class ImageLabel(QtGui.QLabel):
"""
The ImageLabel class holds PDF QPixmap to be displayed in DocumentWidget.
"""
remove_trigger = QtCore.pyqtSignal()
toggle_source_trigger = QtCore.pyqtSignal()
set_clipboard_trigger = QtCore.pyqtSignal(QtCore.QString)
change_scale_trigger = QtCore.pyqtSignal(float)
change_page_trigger = QtCore.pyqtSignal(int)
show_search_trigger = QtCore.pyqtSignal()
hide_search_trigger = QtCore.pyqtSignal()
def __init__(self, parent = None):
super(ImageLabel, self).__init__()
self.parent = parent
self.preamble = PREAMBLE
self.note_pos = QtCore.QPointF()
self.note_icon_pos = QtCore.QPoint()
self.current_uid = 0
self.closest_id = 0
self.notes = {}
self.move = False
self.drag = False
self.overscroll = 0
self.control = False
self.noteImage = QtGui.QImage(':img/note22.png')
self.rubber_band = QtGui.QRubberBand( QtGui.QRubberBand.Rectangle, self)
self.drag_position = QtCore.QPoint()
self.setMouseTracking(True)
self.setAcceptDrops(True)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.connect(self,
QtCore.SIGNAL("customContextMenuRequested(const QPoint &)"),
self.contextMenu)
self.add_menu = QtGui.QMenu()
self.addNoteAction = QtGui.QAction(self)
self.addNoteAction.setText("Add annotation")
self.connect(self.addNoteAction, QtCore.SIGNAL("triggered()"),
self.slot_add_note)
self.add_menu.addAction(self.addNoteAction)
self.change_menu = QtGui.QMenu()
self.editNoteAction = QtGui.QAction(self)
self.editNoteAction.setText("Edit annotation")
self.connect(self.editNoteAction, QtCore.SIGNAL("triggered()"),
self.slot_edit_note)
self.change_menu.addAction(self.editNoteAction)
self.moveNoteAction = QtGui.QAction(self)
self.moveNoteAction.setText("Move annotation")
self.connect(self.moveNoteAction, QtCore.SIGNAL("triggered()"),
self.slot_move_note)
self.change_menu.addAction(self.moveNoteAction)
self.removeNoteAction = QtGui.QAction(self)
self.removeNoteAction.setText("Remove annotation")
self.connect(self.removeNoteAction, QtCore.SIGNAL("triggered()"),
self.slot_remove_note)
self.change_menu.addAction(self.removeNoteAction)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls():
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
files = []
for url in event.mimeData().urls():
files.append(str(url.toLocalFile()))
self.emit(QtCore.SIGNAL("dropped"), files)
else:
event.ignore()
def keyPressEvent(self, event):
if self.parent.Document:
if event.modifiers() == QtCore.Qt.ControlModifier:
if ( event.key() == QtCore.Qt.Key_Plus or
event.key() == QtCore.Qt.Key_Equal):
self.change_scale_trigger.emit(self.parent.scale + 0.25)
elif ( event.key() == QtCore.Qt.Key_Minus and
self.parent.scale > 0.25):
self.change_scale_trigger.emit(self.parent.scale - 0.25)
elif event.key() == QtCore.Qt.Key_0:
self.change_scale_trigger.emit(1.0)
return
if (event.matches(QtGui.QKeySequence.Find) or
event.key() == QtCore.Qt.Key_Slash):
self.show_search_trigger.emit()
elif event.key() == QtCore.Qt.Key_Escape:
self.hide_search_trigger.emit()
elif event.key() == QtCore.Qt.Key_Left:
page = self.parent.page + 1 - 1 + self.parent.offset
self.change_page_trigger.emit(page % self.parent.num_pages)
elif event.key() == QtCore.Qt.Key_Right:
page = self.parent.page + 1 + 1 + self.parent.offset
self.change_page_trigger.emit(page % self.parent.num_pages)
elif event.key() == QtCore.Qt.Key_Down:
bar = self.parent.parent.verticalScrollBar()
bar.setValue(bar.value() + 120)
elif event.key() == QtCore.Qt.Key_Up:
bar = self.parent.parent.verticalScrollBar()
bar.setValue(bar.value() - 120)
def mouseMoveEvent(self, event):
"""
Event handling mouse movement.
"""
if self.parent.Document is None:
return
try:
note = self.notes[self.closest_id]
has_note = True
except KeyError:
has_note = False
width = self.pt2px(self.parent.CurrentPage.pageSizeF())[0]
x_offset = (self.rect().width() - width)/2.0
if has_note and self.drag:
#print 'Drag note %d' %note.uid
note.pos = self.px2pt(event.x() - x_offset, event.y())
self.parent.update_image()
return
#if has_note and self.find_closest(event.x(), event.y()):
#note.update()
#img_path = note.filename.rstrip('tex') + 'border.png'
#QtGui.QToolTip.showText(event.globalPos(),
#'Note %d: <br /> <img src="%s">'
#% (note.uid, img_path), self)
if (event.x() >= x_offset) and (event.x() <= width + x_offset):
try:
x1 = self.drag_position.x()
y1 = self.drag_position.y()
x2 = event.x()
y2 = event.y()
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
#print QtCore.QRect(QtCore.QPoint(x1, y1), QtCore.QPoint(x2, y2))
self.rubber_band.setGeometry(QtCore.QRect(QtCore.QPoint(x1, y1),
QtCore.QPoint(x2, y2)))
except IOError:
print 'IOError in rubberBand.setGeometry try.'
pass
def mousePressEvent(self, event):
if self.parent.Document is None:
return
if event.button() == 1: # Left click
width = self.pt2px(self.parent.CurrentPage.pageSizeF())[0]
x_offset = (self.rect().width() - width)/2.0
if (event.x() >= x_offset) and (event.x() <= width + x_offset):
self.drag_position = QtCore.QPoint(event.pos())
self.rubber_band = QtGui.QRubberBand(
QtGui.QRubberBand.Rectangle, self)
self.rubber_band.setGeometry(QtCore.QRect(
self.drag_position, QtCore.QSize()))
self.rubber_band.show()
if self.find_closest(event.x(), event.y()):
self.drag = True
note = self.notes[self.closest_id]
note.update()
img_path = note.filename.rstrip('tex') + 'border.png'
QtGui.QToolTip.showText(event.globalPos(),
'Note %d: <br /> <img src="%s">'
% (note.uid, img_path), self)
else:
self.drag = False
else:
self.drag = False
else:
self.drag = False
def mouseReleaseEvent(self, event):
if self.parent.Document is None:
return
self.drag = False
width = self.pt2px(self.parent.CurrentPage.pageSizeF())[0]
x_offset = (self.rect().width() - width)/2.0
if self.move:
note = self.notes[self.closest_id]
note.pos = self.px2pt(event.x() - x_offset, event.y())
note.mdate = datetime.datetime.now()
self.parent.update_image()
self.move = False
if not self.rubber_band.size().isEmpty():
x_px = self.rubber_band.x() - x_offset
y_px = self.rubber_band.y()
width_px = self.rubber_band.width()
height_px = self.rubber_band.height()
pos = self.px2pt(x_px, y_px)
x_pt = pos.x()
y_pt = pos.y()
size = self.px2pt(width_px, height_px)
width_pt = size.x()
height_pt = size.y()
rect = QtCore.QRectF(x_pt, y_pt, width_pt, height_pt)
#print rect
text = self.parent.CurrentPage.text(rect)
if text:
self.set_clipboard_trigger.emit(text)
self.rubber_band.hide()
def mouseDoubleClickEvent(self, event):
if self.parent.Document is None:
return
try:
self.notes[self.closest_id]
has_note = True
except KeyError:
has_note = False
width = self.pt2px(self.parent.CurrentPage.pageSizeF())[0]
x_offset = (self.rect().width() - width)/2.0
if has_note and not self.drag:
if (event.x() >= x_offset) and (event.x() <= width + x_offset):
if self.find_closest(event.x(), event.y()):
self.toggle_source_trigger.emit()
def wheelEvent(self, event):
bar = self.parent.parent.verticalScrollBar()
if event.delta() < 0:
limit, add_to_page = bar.maximum(), 1
else:
limit, add_to_page = bar.minimum(), -1
if event.modifiers() == QtCore.Qt.ControlModifier:
if add_to_page == 1 and self.parent.scale > 0.1:
self.change_scale_trigger.emit(self.parent.scale - 0.1)
elif add_to_page == -1:
self.change_scale_trigger.emit(self.parent.scale + 0.1)
else:
super(ImageLabel, self).wheelEvent(event)
if bar.sliderPosition() == limit:
self.overscroll += 1
if self.overscroll > 6:
self.overscroll = 0
page = self.parent.page + 1 + add_to_page + self.parent.offset
self.change_page_trigger.emit(page % self.parent.num_pages)
#if add_to_page < 0:
#print 'previous'
#bar.setValue(2000)
#if add_to_page > 0:
#print 'next'
#bar.setValue(0)
else:
self.overscroll = 0
def contextMenu(self, pos):
"""
Event handling right-click contextMenu
"""
if self.parent.Document is None:
return
#print self.notes.values()
width = self.pt2px(self.parent.CurrentPage.pageSizeF())[0]
x_offset = (self.rect().width() - width)/2.0
if (pos.x() >= x_offset) and (pos.x() <= width + x_offset):
if self.find_closest(pos.x(), pos.y()):
self.change_menu.exec_(self.mapToGlobal(pos))
else:
self.note_pos = self.px2pt(pos.x() - x_offset, pos.y())
self.note_icon_pos = QtCore.QPoint(pos.x() - x_offset, pos.y())
#print 'Note position: ', self.note_pos
#print 'Mouse position', pos
self.add_menu.exec_(self.mapToGlobal(pos))
def find_closest(self, x, y):
"""
Finds closest note to coordinates (x, y).
Returns True if successful, False otherwise.
"""
width = self.pt2px(self.parent.CurrentPage.pageSizeF())[0]
x_offset = (self.rect().width() - width)/2.0
if len(self.notes) != 0:
for note in self.notes.values():
n_x = (note.pos.x() * self.parent.scale * DPI_X/72.0) + 11
n_y = (note.pos.y() * self.parent.scale * DPI_Y/72.0) + 11
dx = abs(x - x_offset - n_x)
dy = abs(y - n_y)
if dx <= 11 and dy <= 11:
self.closest_id = note.uid
return True
return False
def slot_add_note(self):
"""
Slot to add a note. Creates new uid, generates note and displays the
icon.
"""
try:
uid = max(self.notes.keys()) + 1
except ValueError:
uid = 0
self.current_uid = uid
self.notes[uid] = Note('New note', self.preamble, COMPILER,
self.parent.page, self.note_pos, uid)
self.parent.update_image()
def slot_edit_note(self):
"""
Slot to edit note. Update the current_uid with the one closest to the
click.
"""
#print "Editing note %d\n" % self.closest_id
self.current_uid = self.closest_id
self.notes[self.current_uid].mdate = datetime.datetime.now()
def slot_move_note(self):
"""
Slot to move note.
"""
self.move = True
def slot_remove_note(self):
"""
Slot to remove note. Update the current_uid with the one closest to the
click. Also emits remove_trigger sinal.
"""
#print 'Remove note %d' % self.closest_id
self.remove_trigger.emit()
def pt2px(self, qsize):
"""
Convert from pt to px.
"""
width = qsize.width()
width *= self.parent.scale * DPI_X / 72.0
height = qsize.height()
height *= self.parent.scale * DPI_Y / 72.0
return (width, height)
def px2pt(self, x, y):
"""
Convert from px to pt.
"""
width = 72.0 * x/(DPI_X * self.parent.scale)
height = 72.0 * y/(DPI_Y * self.parent.scale)
return QtCore.QPointF(width, height)
|
cako/notorius
|
src/image_label.py
|
Python
|
gpl-3.0
| 15,994
|
""" Curriculum-based course timetabling solver;
solves timetabling problems formulated in .ectt file format (http://tabu.diegm.uniud.it/ctt/)
Copyright (C) 2013 Stephan E. Becker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
__author__ = 'Stephan Becker'
import math
import data
# hard constraint: Availability
def teacherIsAvailable(event, timeslot):
"""
return True if the teacher of the course is available in the timeslot
"""
if event is None:
return True
for constraint in data.unavailability_constraints:
if event.id == constraint.courseID and timeslot == constraint.timeslot:
return False
return True
# hard constraint: Lectures (part 2 of 2)
def timeslotHasSameLecture(event, timeslot):
"""
checks if a lecture of the same course is already assigned to this timeslot,
returns True if there is already a lecture of the course in this timeslot
"""
if event is None:
return False
for room in range(data.numberOfRooms):
if not data.timetable[(room, timeslot)] is None:
if data.timetable[(room, timeslot)].id == event.id:
return True
return False
def timeslotHasSameTeacher(event, timeslot):
"""
checks if a course with the same teacher is already assigned to this timeslot,
returns True if there is
"""
if event is None:
return False
for room in range(data.numberOfRooms):
currentEv = data.timetable[(room, timeslot)] # is the current course also taught by this teacher?
if not currentEv is None:
if currentEv.id in data.teachers[event.teacher]:
return True
return False
def timeslotHasSameCurriculum(event, timeslot):
"""
checks if a course in the same timeslot is part of the same curriculum
returns True if it is
"""
if event is None:
return False
curriculaOfEvent = data.coursesToCurricula[event.id] # which curricula is this course part of?
for room in range(data.numberOfRooms):
currentEv = data.timetable[(room, timeslot)]
if not currentEv is None:
for cu in curriculaOfEvent: # checks whether the current course is also part of the same curriculum
if currentEv.id in data.curriculaToCourses[cu]:
return True
return False
def assignCourseToPosition(course, position):
"""
assign the course to the position in the timetable
"""
# if data.timetable[position] is None and courseFitsIntoTimeslot(course, position[1]):
data.timetable[position] = course
data.emptyPositions.remove(position)
data.forbiddenPositions.append(position)
def removeCourseAtPosition(position):
"""
remove the course which was assigned at the position from the timetable
and add it to unassigned events
returns the removed course
"""
ev = data.timetable[position]
if not ev is None:
data.timetable[position] = None
data.emptyPositions.append(position)
return ev
def courseFitsIntoTimeslot(course, timeslot):
return not timeslotHasSameLecture(course, timeslot) and teacherIsAvailable(course, timeslot) \
and not timeslotHasSameTeacher(course, timeslot) and not timeslotHasSameCurriculum(course, timeslot)
|
stBecker/CB-CTT_Solver
|
Course timetabling solver/hard.py
|
Python
|
gpl-3.0
| 3,933
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from roars.gui.pyqtutils import PyQtWidget, PyQtImageConverter
from WBaseWidget import WBaseWidget
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4 import QtCore
class WAxesButtons(WBaseWidget):
def __init__(self, name='axes', label='Axes Buttons', changeCallback=None, step=0.001):
super(WAxesButtons, self).__init__(
'ui_axes_buttons'
)
self.name = name
self.label = label
self.step = step
self.ui_label.setText(label)
#⬢⬢⬢⬢⬢➤ Callback
self.changeCallback = changeCallback
self.buttons = {
'x+': self.ui_button_x_plus,
'x-': self.ui_button_x_minus,
'y+': self.ui_button_y_plus,
'y-': self.ui_button_y_minus,
'z+': self.ui_button_z_plus,
'z-': self.ui_button_z_minus
}
self.buttons_name_map = {}
# TODO:example style
self.ui_button_x_minus.setStyleSheet(
"QPushButton:hover{background-color: red}")
for label, button in self.buttons.iteritems():
button.clicked.connect(self.buttonPressed)
self.buttons_name_map[str(button.objectName())] = label
def buttonPressed(self):
if self.changeCallback != None:
label = self.buttons_name_map[str(self.sender().objectName())]
delta = float(label[1] + str(self.step))
val = (self.name, label[0], delta)
self.changeCallback(val)
|
m4nh/roars
|
scripts/roars/gui/widgets/WAxesButtons.py
|
Python
|
gpl-3.0
| 1,548
|
# -*- coding: UTF-8 -*-
#
# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>
#
# This file is part of Wammu <https://wammu.eu/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
'''
Wammu - Phone manager
Image displaying classes to be embdeded inside wxHTML
'''
import io
import wx
import wx.lib.throbber
import base64
defaultbmp = [
'20 20 2 1',
'. c Black',
' c None',
' ',
' .. .. ',
' ... ... ',
' ... ... ',
' ... ... ',
' ... ... ',
' ... ... ',
' ... ... ',
' ...... ',
' .... ',
' .... ',
' ...... ',
' ... ... ',
' ... ... ',
' ... ... ',
' ... ... ',
' ... ... ',
' ... ... ',
' .. .. ',
' ']
class MemoryInputStream(wx.InputStream):
def __init__(self, data):
wx.InputStream.__init__(self, io.StringIO(data))
class EncodedBitmap(wx.StaticBitmap):
def __init__(self, parent, tooltip='Image', image=None, size=None, scale=1):
if image is None:
image = defaultbmp
image = wx.ImageFromStream(MemoryInputStream(base64.b64decode(image)))
if scale > 1:
bitmap = wx.BitmapFromImage(image.Scale(image.GetWidth() * scale, image.GetHeight() * scale))
else:
bitmap = wx.BitmapFromImage(image)
wx.StaticBitmap.__init__(self, parent, -1, bitmap, (0, 0))
self.SetToolTipString(tooltip)
class Bitmap(wx.StaticBitmap):
def __init__(self, parent, tooltip='Image', image=None, size=None, scale=1):
if image is None:
image = defaultbmp
bitmap = wx.BitmapFromXPMData(image)
if scale > 1:
img = wx.ImageFromBitmap(bitmap)
bitmap = wx.BitmapFromImage(img.Scale(bitmap.GetWidth() * scale, bitmap.GetHeight() * scale))
wx.StaticBitmap.__init__(self, parent, -1, bitmap, (0, 0))
self.SetToolTipString(tooltip)
class Throbber(wx.lib.throbber.Throbber):
def __init__(self, parent, tooltip='Animation', images=None, size=None, scale=1, delay=0.1):
if images is None:
images = [defaultbmp]
bitmaps = []
for im in images:
bitmap = wx.BitmapFromXPMData(im)
if scale > 1:
img = wx.ImageFromBitmap(bitmap)
bitmap = wx.BitmapFromImage(img.Scale(bitmap.GetWidth() * scale, bitmap.GetHeight() * scale))
bitmaps.append(bitmap)
wx.lib.throbber.Throbber.__init__(self, parent, -1, bitmaps, frameDelay=delay)
self.SetToolTipString(tooltip)
|
gammu/wammu
|
Wammu/Image.py
|
Python
|
gpl-3.0
| 3,359
|
__author__ = 'Liam'
import types
def flag(func):
func.is_flag = True
return func
class BadSearchOp(Exception):
def __init__(self, value = "bad search operation"):
self.value = value
def __str__(self):
return "BadSearchOp: %s" % self.value
class ImapSearchQueryParser(object):
"""
Receives a list of commands for the IMAP V4 search
and returns a dictionary of the commands, that can be used in various mail API's
including walla API for mail
based on RFC3501:
https://tools.ietf.org/html/rfc3501#section-6.4.4
example of commands:
C: A282 SEARCH FLAGGED SINCE 1-Feb-1994 NOT FROM "Smith"
S: * SEARCH 2 84 882
S: A282 OK SEARCH completed
C: A283 SEARCH TEXT "string not in mailbox"
S: * SEARCH
S: A283 OK SEARCH completed
C: A284 SEARCH CHARSET UTF-8 TEXT {6}
C: XXXXXX
S: * SEARCH 43
S: A284 OK SEARCH completed
"""
def __init__(self):
"""
:param query:
:return:
"""
#self.log("{} constructor ".format(self.__class__.__name__))
self.opFunctionList = [x for x,y in self.__class__.__dict__.items() if type(y) == types.FunctionType]
self.query = None
self.commands = {}
self.commands_list = []
#self.__validate()
#########################################################################
#
def __repr__(self):
return self.__class__.__name__+", commands: %s" % self.commands
def log(self,msg):
print msg
#self.logger.log(logging.DEBUG,msg)
def __str__(self):
return str(self.commands)
def _update_command_list(self, command, idx1, idx2=None):
"""
Updates both the command list and commands as to prepare for OR parsing
:param command: a single dictionary object with one key:value (command:argument)
:param idx1: first index
:param idx2: second index
:return:
"""
command_wrapper = {
'data': command,
'pos': [idx1]
}
# update second position
if idx2:
command_wrapper['pos'].append(idx2)
# adding to command list with positions of current command and argument
self.commands_list.append(command_wrapper)
# update the command
self.commands.update(command)
@flag
def OP__ALL(self,currentIndex=None):
self._update_command_list({'all': True}, currentIndex)
@flag
def OP__ANSWERED(self,currentIndex=None):
self._update_command_list({'answered': True}, currentIndex)
def OP__BCC(self,currentIndex=None):
"""
BCC <string>
Messages that contain the specified string in the envelope
structure's BCC field.
:param currentIndex:
:return:
"""
if currentIndex+1 < len(self.query):
#todo check bcc validation
self._update_command_list({'bcc': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BCC" provided but with no argument in query list')
def OP__BEFORE(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'before': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BEFORE" provided but with no argument in query list')
def OP__BODY(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'body': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "BODY" provided but with no argument in query list')
def OP__CC(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'cc': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "CC" provided but with no argument in query list')
@flag
def OP__DELETED(self,currentIndex=None):
self._update_command_list({'deleted': True}, currentIndex)
@flag
def OP__DRAFT(self,currentIndex=None):
self._update_command_list({'draft': True}, currentIndex)
@flag
def OP__FLAGGED(self,currentIndex=None):
self._update_command_list({'flagged': True}, currentIndex)
def OP__FROM(self,currentIndex=None):
"""
FROM <string>
Messages that contain the specified string in the envelope
structure's FROM field.
:return:
"""
# assuming that next item is the value, such as: FROM 'man@mayman.com'
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'from': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "FROM" provided but with no argument in query list')
def OP__HEADER(self,currentIndex=None):
# todo work on this one
pass
def OP__KEYWORD(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'keyword': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "KEYWORD" provided but with no argument in query list')
def OP__LARGER(self,currentIndex=None):
argument = self._get_command_argument(currentIndex)
if argument:
self._update_command_list({'larger': argument}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "LARGER" provided but with no argument in query list')
@flag
def OP__NEW(self,currentIndex=None):
self._update_command_list({'new': True}, currentIndex)
@flag
def OP__OLD(self,currentIndex=None):
self._update_command_list({'old': True}, currentIndex)
@flag
def OP__RECENT(self,currentIndex=None):
self._update_command_list({'recet': True}, currentIndex)
@flag
def OP__SEEN(self,currentIndex=None):
self._update_command_list({'seen': True}, currentIndex)
@flag
def OP__UNANSWERED(self,currentIndex=None):
self._update_command_list({'unanswered': True}, currentIndex)
@flag
def OP_UNDRAFT(self,currentIndex=None):
self._update_command_list({'undraft': True}, currentIndex)
@flag
def OP__UNFLAGGED(self,currentIndex=None):
self._update_command_list({'unflagged': True}, currentIndex)
@flag
def OP__UNKEYWORD(self,currentIndex=None):
"""
UNKEYWORD <flag>
Messages that do not have the specified keyword flag set.
"""
# todo make it proper somehow
#self.commands.update({'seen': True})
@flag
def OP__UNSEEN(self,currentIndex=None):
self._update_command_list({'unseen': True}, currentIndex)
def OP__SENTBEFORE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'sentbefore': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SENTBEFORE" provided but with no argument in query list')
def OP__SENTON(self, currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'senton': self.query[currentIndex+1]}, currentIndex)
else:
raise BadSearchOp('Operator "SENTON" provided but with no argument in query list')
def OP__SENTSINCE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'sentsince': self.query[currentIndex+1]},currentIndex)
else:
raise BadSearchOp('Operator "SENTSINCE" provided but with no argument in query list')
def OP__SINCE(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'since': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SINCE" provided but with no argument in query list')
def OP__SMALLER(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'smaller': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SMALLER" provided but with no argument in query list')
def OP__SUBJECT(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'subject': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "SUBJECT" provided but with no argument in query list')
def OP__TEXT(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'text': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "TEXT" provided but with no argument in query list')
def OP__TO(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'to': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "TO" provided but with no argument in query list')
def OP__UID(self,currentIndex=None):
if currentIndex+1 < len(self.query):
self._update_command_list({'uid': self.query[currentIndex+1]}, currentIndex, currentIndex+1)
else:
raise BadSearchOp('Operator "UID" provided but with no argument in query list')
def _NOT_PARSER(self):
#print "NOT PARSER---"
for i in range(len(self.query)):
operator = self.query[i]
#print "operator:"+operator
if (operator=="NOT"):
#print "found NOT index:{}".format(i)
# find what is next command
if (i+1<len(self.query)):
next_possible_command = self.query[i+1]
#print "next_possible_command:{}".format(next_possible_command)
# is possible command a valid operator function?
possible_command_function = self.__get_op_function(next_possible_command)
# indeed a function
if (callable(possible_command_function)):
is_flag = getattr(possible_command_function,'is_flag',False)
if is_flag:
command = {next_possible_command.lower(): False}
self._update_command_list(command,i)
else:
old_operator_value = self.commands.get(next_possible_command.lower())
for command in self.commands_list:
if command['data'].get(next_possible_command.lower(),None):
del command['data']
command['data'] = {
'not-'+next_possible_command.lower():old_operator_value
}
# add the from position so it will be match when doing OR NOT
command['pos'].append(i)
self.commands['not-'+next_possible_command.lower()] = old_operator_value
del self.commands[next_possible_command.lower()]
def _OR_PARSER(self):
"""
we start parsing the OR command and dialectically correct / update the commands using the commands_list metadata
:return:
"""
def _find_command_by_indexes(index1,index2):
#for i in range(len(self.commands_list)):
foundCommands = []
for command in self.commands_list:
pos = command['pos']
#print "command:{}".format(command)
if (index1 in pos):
foundCommands.append(command['data'])
if (index2 in pos):
foundCommands.append(command['data'])
#print "Found OR commands: {}".format(foundCommands)
return foundCommands
for i in range(len(self.query)):
operator = self.query[i]
rhs,lhs = None,None
if operator== "OR":
if (i+1<len(self.query)):
rhs = i+1
if i-1 > -1:
lhs = i-1
# only if both rhs and lhs exist can we go on
if not rhs and not lhs:
raise BadSearchOp('Operator "OR" provided but missing both left hand and right hand side params')
or_commands = _find_command_by_indexes(lhs,rhs)
if len(or_commands)==2:
orDict = {}
for command in or_commands:
#orDict.update(command)
# if command in commands
for k,v in command.iteritems():
#print "K:{} v:{}".format(k,v)
# key of command found
if k in self.commands:
orDict[k] = v
del self.commands[k]
#print "orDict:{}".format(orDict)
self.commands['or'] = orDict
#if command in self.commands
#print "OR RHS:{} LHS:{}".format(rhs, lhs)
def _get_command_argument(self,currentIndex):
"""
will treat the next command as argument to command in currentIndex.
used for all commands that have parameters (arguments),
such as:
FROM <string>
BEFORE <date>
BODY <string> etc...
:param currentIndex:
:return:
"""
# assuming that next item is the value, such as: FROM 'man@mayman.com'
if currentIndex+1 < len(self.query):
#todo check validation
argument = self.query[currentIndex+1]
return argument
else:
return None
@property
def opList(self):
return self.opFunctionList
def __get_op_function(self,operator):
operatorFuncName = "OP__"+operator.upper()
if operatorFuncName in self.opList:
opFunction = getattr(self,operatorFuncName)
return opFunction
else:
return None
def __validate(self):
"""
tries to validate the command set
:return:
"""
print "IMAP4 Search Query List:{}".format(self.query)
if len(self.query) < 1:
raise BadSearchOp("not enough items in list, has to be more then 1 (sequence set,search)")
for i in range(len(self.query)):
operator = self.query[i]
opFunction = self.__get_op_function(operator)
if (opFunction):
#print "operator found:{}".format(operator)
opFunction(i)
else:
pass
#print "operator not found:{}".format(operator)
self._NOT_PARSER()
self._OR_PARSER()
return self.commands
def parse(self, query):
self.query = query
return self.__validate()
if __name__ == "__main__":
test_commands = [
['NOT','FLAGGED','SINCE','1-Feb-1994','NOT','FROM','Smith','BCC', 'aaaa@aaaa.net.il'],
['NOT','BEFORE','1-Feb-1994','NOT','FROM','Smith'],
['SEEN','BEFORE','1-Feb-1994','OR','NOT','FROM','Smith'],
['NOT','SENTBEFORE','1-Feb-1994','NOT','FROM','Smith'],
['SUBJECT','all about love','NOT','TO','aaaa@aaaa.net.il','SINCE','1-Feb-1994','NOT','FROM','Smith','UID','1:*','OR','NOT','TEXT','Go To Hello'],
['SEEN','BEFORE','1-Feb-1994','OR','NOT','FROM','Smith']
]
for command_set in test_commands:
c = ImapSearchQueryParser()
res = c.parse(command_set)
print "Result:{}".format(res)
#print "command_list:{}".format(c.commands_list)
|
syberkitten/Imap4SearchQueryParser
|
SearchParser.py
|
Python
|
gpl-3.0
| 16,261
|
#!/usr/bin/env python3
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__prog_name__ = 'generate_date_table.py'
__prog_desc__ = 'Generate table with LPSN year or priority for species and subspecies names.'
__author__ = 'Pierre Chaumeil'
__copyright__ = 'Copyright 2018'
__credits__ = ['Pierre Chaumeil']
__license__ = 'GPL3'
__version__ = '0.0.2'
__maintainer__ = 'Pierre Chaumeil'
__email__ = 'uqpchaum@uq.edu.au'
__status__ = 'Development'
import os
import sys
import csv
import argparse
import re
import datetime
import logging
from biolib.logger import logger_setup
class DateEditor(object):
"""Main class
"""
def __init__(self):
"""Initialization."""
self.logger = logging.getLogger('timestamp')
def parse_lpsn_scraped_priorities(self, lpsn_scraped_species_info):
"""Parse year of priority from references scraped from LPSN."""
priorities = {}
with open(lpsn_scraped_species_info) as lsi:
lsi.readline()
for line in lsi:
infos = line.rstrip('\n').split('\t')
sp = infos[0]
if sp == 's__':
# *** hack to skip bad case in file
# Pierre to fix
continue
species_authority = infos[2]
reference_str = species_authority.split(', ')[0]
references = reference_str.replace('(', '').replace(')', '')
years = re.sub(r'emend\.[^\d]*\d{4}', '', references)
years = re.sub(r'ex [^\d]*\d{4}', ' ', years)
years = re.findall('[1-3][0-9]{3}', years, re.DOTALL)
years = [int(y) for y in years if int(y) <= datetime.datetime.now().year]
if len(years) == 0:
# assume this name is validated through ICN and just take the first
# date given as the year of priority
years = re.findall('[1-3][0-9]{3}', references, re.DOTALL)
years = [int(y) for y in years if int(y) <= datetime.datetime.now().year]
priorities[sp.replace('s__', '')] = years[0]
# We make sure that species and subspecies type species have the same date
# ie Photorhabdus luminescens and Photorhabdus luminescens subsp.
# Luminescens
for k, v in priorities.items():
infos_name = k.split(' ')
if len(infos_name) == 2 and '{0} {1} subsp. {1}'.format(infos_name[0], infos_name[1]) in priorities:
priorities[k] = min(int(v), int(priorities.get(
'{0} {1} subsp. {1}'.format(infos_name[0], infos_name[1]))))
elif len(infos_name) == 4 and infos_name[1] == infos_name[3] and '{} {}'.format(infos_name[0], infos_name[1]) in priorities:
priorities[k] = min(int(v), int(priorities.get(
'{} {}'.format(infos_name[0], infos_name[1]))))
return priorities
def parse_lpsn_gss_priorities(self, lpsn_gss_file):
"""Get priority of species and usbspecies from LPSN GSS file."""
priorities = {}
illegitimate_names = set()
with open(lpsn_gss_file, encoding='utf-8', errors='ignore') as f:
csv_reader = csv.reader(f)
for line_num, tokens in enumerate(csv_reader):
if line_num == 0:
genus_idx = tokens.index('genus_name')
specific_idx = tokens.index('sp_epithet')
subsp_idx = tokens.index('subsp_epithet')
status_idx = tokens.index('status')
author_idx = tokens.index('authors')
else:
generic = tokens[genus_idx].strip().replace('"', '')
specific = tokens[specific_idx].strip().replace('"', '')
subsp = tokens[subsp_idx].strip().replace('"', '')
if subsp:
taxon = '{} {} subsp. {}'.format(generic, specific, subsp)
elif specific:
taxon = '{} {}'.format(generic, specific)
else:
# skip genus entries
continue
status = tokens[status_idx].strip().replace('"', '')
status_tokens = [t.strip() for t in status.split(';')]
status_tokens = [tt.strip() for t in status_tokens for tt in t.split(',') ]
if 'illegitimate name' in status_tokens:
illegitimate_names.add(taxon)
if taxon in priorities:
continue
# get priority references, ignoring references if they are
# marked as being a revied name as indicated by a 'ex' or 'emend'
# (e.g. Holospora (ex Hafkine 1890) Gromov and Ossipov 1981)
ref_str = tokens[author_idx]
references = ref_str.replace('(', '').replace(')', '')
years = re.sub(r'emend\.[^\d]*\d{4}', '', references)
years = re.sub(r'ex [^\d]*\d{4}', ' ', years)
years = re.findall('[1-3][0-9]{3}', years, re.DOTALL)
years = [int(y) for y in years if int(y) <= datetime.datetime.now().year]
if (taxon not in illegitimate_names
and taxon in priorities
and years[0] != priorities[taxon]):
# conflict that can't be attributed to one of the entries being
# considered an illegitimate name
self.logger.error('Conflicting priority references for {}: {} {}'.format(
taxon, years, priorities[taxon]))
priorities[taxon] = years[0]
return priorities
def run(self, lpsn_scraped_species_info, lpsn_gss_file, out_dir):
"""Parse priority year from LPSN data."""
self.logger.info('Reading priority references scrapped from LPSN.')
scraped_sp_priority = self.parse_lpsn_scraped_priorities(lpsn_scraped_species_info)
self.logger.info(' - read priority for {:,} species.'.format(len(scraped_sp_priority)))
self.logger.info('Reading priority references from LPSN GSS file.')
gss_sp_priority = self.parse_lpsn_gss_priorities(lpsn_gss_file)
self.logger.info(' - read priority for {:,} species.'.format(len(gss_sp_priority)))
self.logger.info('Scrapped priority information for {:,} species not in GSS file.'.format(
len(set(scraped_sp_priority) - set(gss_sp_priority))))
self.logger.info('Parsed priority information for {:,} species not on LPSN website.'.format(
len(set(gss_sp_priority) - set(scraped_sp_priority))))
self.logger.info('Writing out year of priority for species giving preference to GSS file.')
output_file = open(os.path.join(out_dir, 'year_table.tsv'), 'w')
same_year = 0
diff_year = 0
for sp in sorted(set(scraped_sp_priority).union(gss_sp_priority)):
if sp in gss_sp_priority:
output_file.write('{}\t{}\n'.format(sp, gss_sp_priority[sp]))
else:
output_file.write('{}\t{}\n'.format(sp, scraped_sp_priority[sp]))
if sp in gss_sp_priority and sp in scraped_sp_priority:
if gss_sp_priority[sp] == scraped_sp_priority[sp]:
same_year += 1
else:
diff_year += 1
self.logger.info(' - same priority year in GSS file and website: {:,}'.format(same_year))
self.logger.info(' - different priority year in GSS file and website: {:,}'.format(diff_year))
output_file.close()
if __name__ == '__main__':
print(__prog_name__ + ' v' + __version__ + ': ' + __prog_desc__)
print(' by ' + __author__ + ' (' + __email__ + ')' + '\n')
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lpsn_scraped_species_info',
help='LPSN species file created by LPSN website parsing.',
required=True)
parser.add_argument('--lpsn_gss_file',
help="table from lpsn.dsmz.de with nomenclature information (lpsn_gss_<date>.csv)",
required=True)
parser.add_argument('--out_dir',
help='Output directory.',
required=True)
args = parser.parse_args()
logger_setup(args.out_dir,
__prog_name__.replace('.py', '.log'),
__prog_name__,
__version__,
False)
try:
dateeditor = DateEditor()
dateeditor.run(args.lpsn_scraped_species_info,
args.lpsn_gss_file,
args.out_dir)
except SystemExit:
print("\nControlled exit resulting from an unrecoverable error or warning.")
raise
except:
print("\nUnexpected error:", sys.exc_info()[0])
raise
|
Ecogenomics/GTDBNCBI
|
scripts_dev/type_genome_selection/generate_date_table.py
|
Python
|
gpl-3.0
| 10,985
|
# -*- encoding: utf-8 -*-
"""Test class for Host Collection CLI"""
import csv
import os
import re
import tempfile
from fauxfactory import gen_string
from itertools import product
from random import sample
from robottelo import ssh
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.contenthost import ContentHost
from robottelo.cli.contentview import ContentView
from robottelo.cli.factory import make_org
from robottelo.cli.hostcollection import HostCollection
from robottelo.cli.import_ import Import
from robottelo.cli.org import Org
from robottelo.cli.repository import Repository
from robottelo.cli.subscription import Subscription
from robottelo.cli.template import Template
from robottelo.cli.user import User
from robottelo.decorators import bz_bug_is_open, skip_if_bug_open
from robottelo.helpers import prepare_import_data
from robottelo.test import CLITestCase
def clean_transdata():
"""Remove transition dataset
"""
ssh.command(u'rm -rf "${HOME}"/.transition_data "${HOME}"/puppet_work_dir')
def build_csv_file(rows=None, dirname=None):
"""Generate a csv file, feed it by the provided data
(a list of dictionary objects) and return a path to it
"""
if rows is None:
rows = [{}]
file_name = tempfile.mkstemp()[1]
with open(file_name, 'wb') as csv_file:
csv_writer = csv.DictWriter(
csv_file, fieldnames=rows[0].keys(), lineterminator='\n'
)
csv_writer.writeheader()
for row in rows:
csv_writer.writerow({
key: val.encode('utf8') for key, val in row.items()
})
if dirname is None:
remote_file = file_name
else:
remote_file = os.path.join(dirname, os.path.basename(file_name))
ssh.upload_file(file_name, remote_file)
os.remove(file_name)
return remote_file
def import_content_hosts(files, tmp_dir):
"""Import all Content Hosts from the Sat5 export csv file including all
the required entities.
:param files: A dictionary of CSV file names and paths
:param tmp_dir: A path to the dataset
:returns: A dictionary of Import objects for every entity
"""
import_org = Import.organization_with_tr_data(
{'csv-file': files['users']}
)
import_repo = Import.repository_with_tr_data({
'csv-file': files['repositories'],
'synchronize': True,
'wait': True,
})
import_cv = Import.content_view_with_tr_data({
u'csv-file': files['content-views'],
u'dir': os.path.join(tmp_dir, 'exports/CHANNELS'),
u'verbose': True
})
# proceed with importing the content hosts
import_chosts = Import.content_host_with_tr_data({
u'csv-file': files['system-profiles'],
u'export-directory': tmp_dir,
u'verbose': True
})
return {
u'organizations': import_org,
u'repositories': import_repo,
u'content_views': import_cv,
u'content_hosts': import_chosts,
}
def update_csv_values(files, new_data, dirname=None):
"""Build CSV file(s) with updated key values provided as an argument
in order to randomize the dataset with keeping the organization_id
mappings
:param files: A dictionary with transition files and their paths on
a remote server.
:param new_data: A dictionary containing a file name as a key and a list
of dictionaries representing the individual changes to the CSV.
For example::
{'users': [
{
u'key_id': u'1',
u'organization': u'updated_organization_name_1',
u'username': u'updated_user_name_1',
},
{
u'key_id': u'2',
u'organization': u'updated_organization_name_2',
u'username': u'updated_user_name_2',
}
]}
:param dirname: A string. Target destination for the new CSV files.
:returns: A dictionary with updated CSV file paths.
"""
for file_ in new_data:
updated = False
result = Import.csv_to_dataset([files[file_]])
for change in new_data[file_]:
key = change.get('key')
for record in result:
if record.get(key) == change['key_id']:
record.update(change)
del record['key_id']
del record['key']
updated = True
if updated:
files[file_] = build_csv_file(result, dirname)
return files
def verify_rh_repos(tr_data, channels_file):
"""Verifies that appropriate Products and Content Views have been created
for the enabled Red Hat repository.
:param tr_data: Transition data of the Import command
:param channels_file: Sat5 transition file containing the channels to be
imported/enabled
:returns: A tuple of lists containing info about all related Products and
Content Views
"""
rh_repos = [
repo for repo in Import.csv_to_dataset([channels_file])
if (
repo['channel_name'].startswith('Red Hat') or
repo['channel_name'].startswith('RHN')
)
]
repo_list = []
cv_list = []
for record in product(rh_repos, tr_data):
repo_list.append(
Repository.list({
u'organization-id': record[1]['sat6'],
u'name': Import.repos[record[0]['channel_label']]
})
)
cv_list.append(
ContentView.info({
u'organization-id': record[1]['sat6'],
u'name': record[0]['channel_name']
})['id']
)
return repo_list, cv_list
def get_sat6_id(
entity_dict, transition_dict, tr_key='sat5', ent_key='organization_id'
):
"""Updates the dictionary of the import entity with 'sat6' key/value pairs
for keeping the Satellite 6 referrence to the imported entity
:param entity_dict: A dictionary holding the info for an entity to be
imported (typically a product of csv_to_dataset())
:param transition_dict: A dictionary holding the transition data for the
imported entity (typically a product of Import.*_with_tr_data())
:param tr_key: A string identifying a transition key field to identify
an entity id
:param ent_key: A string identifying entity key field to identify
an entity id
:returns: entity_dict updated by 'sat6' key/value pair
"""
for entity, tr_record in product(entity_dict, transition_dict):
if tr_record[tr_key] == entity[ent_key]:
entity.update({'sat6': tr_record['sat6']})
return entity_dict
def gen_import_org_data():
"""Random data for Organization Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
return (
{'users': [{
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('alphanumeric')
} for i in range(len(org_ids))]},
{'users': [{
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('utf8')
} for i in range(len(org_ids))]},
)
def gen_import_org_manifest_data():
"""Random data for Organization Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
random_data = (
{'users': [{
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('alphanumeric')
} for i in range(len(org_ids))]},
)
if not bz_bug_is_open('1260722'):
random_data = random_data + (
{'users': [{
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('utf8')
} for i in range(len(org_ids))]},
)
return random_data
def gen_import_user_data():
"""Random data for User Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
return (
{'users': [{
u'key': u'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('alphanumeric'),
u'username': gen_string('alphanumeric')
} for i in range(len(org_ids))]},
{'users': [{
u'key': u'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('utf8'),
u'username': gen_string('utf8')
} for i in range(len(org_ids))]},
)
def gen_import_hostcol_data():
"""Random data for Organization Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
random_data = {'users': [], 'system-groups': []}
for i in range(len(org_ids)):
random_data['users'].append({
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('alphanumeric'),
})
random_data['system-groups'].append({
u'key': u'org_id',
u'key_id': type(u'')(i + 1),
u'org_id': org_ids[i],
u'name': gen_string('alphanumeric'),
})
return (random_data,)
def gen_import_repo_data():
"""Random data for Repository Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
random_data = {'users': [], 'repositories': []}
for i in range(len(org_ids)):
random_data['users'].append({
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('alphanumeric'),
})
random_data['repositories'].append({
u'key': 'org_id',
u'key_id': type(u'')(i + 1),
u'org_id': org_ids[i],
})
return (random_data,)
def gen_import_cv_data():
"""Random data for Content View Import tests"""
return ({
u'users': [{
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization': gen_string('alphanumeric')}
for i in range(3)
],
u'content-views': [{
u'key': u'org_id',
u'key_id': type(u'')(i + 1),
u'channel_name': gen_string('alphanumeric'),
u'channel_label': gen_string('alphanumeric')}
for i in range(3)
]},
)
def gen_import_rh_repo_data():
"""Random data for RH Repos Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
# wipe all channel names and labels excepting channel id 106
return (
{
u'users': [{
u'key': u'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('alphanumeric'),
} for i in range(len(org_ids))],
u'channels': [{
u'key': u'channel_id',
u'key_id': type(u'')(i),
u'channel_label': u'',
u'channel_name': gen_string('alphanumeric'),
} for i in set(range(101, 113)) - {106}] + [
{
u'key': u'org_id',
u'key_id': type(u'')(i + 1),
u'org_id': org_ids[i],
} for i in range(len(org_ids))
],
},
)
def gen_import_chost_data():
"""Random data for Content Host Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
return (
{
u'users': [{
u'key': u'organization_id',
u'key_id': type(u'')(i + 1),
u'organization': gen_string('alphanumeric'),
} for i in range(len(org_ids))],
u'content-views': [{
u'key': u'org_id',
u'key_id': type(u'')(i + 1),
u'channel_name': gen_string('alphanumeric'),
u'channel_label': gen_string('alphanumeric')}
for i in range(len(org_ids))
],
# wipe all channel labels to make hammer skip the sync
u'channels': [{
u'key': u'channel_id',
u'key_id': type(u'')(i),
u'channel_label': u'',
u'channel_name': gen_string('alphanumeric')}
for i in set(range(101, 113))
],
u'system-profiles': [{
u'key': u'server_id',
u'key_id': type(u'')(1000010000 + i),
u'base_channel_id': u'110',
u'child_channel_id': u'None;111'}
for i in set(range(8, 11))
],
},
)
def gen_import_snippet_data():
"""Random data for Repository Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
random_data = {'users': [], 'kickstart-scripts': []}
for i in range(len(org_ids)):
random_data['users'].append({
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('alphanumeric'),
})
random_data['kickstart-scripts'].append({
u'key': 'org_id',
u'key_id': type(u'')(i + 1),
u'org_id': org_ids[i],
u'script_name': gen_string('utf8'),
u'kickstart_label': gen_string('utf8'),
u'script_type': sample([u'pre', u'post'], 1).pop(),
u'chroot': sample([u'Y', u'N'], 1).pop(),
})
return (random_data,)
def gen_import_config_files_data():
"""Random data for Config File Import tests"""
org_ids = [type(u'')(org_id) for org_id in sample(range(1, 1000), 3)]
random_data = {'users': [], 'config-files-latest': []}
for i in range(len(org_ids)):
random_data['users'].append({
u'key': 'organization_id',
u'key_id': type(u'')(i + 1),
u'organization_id': org_ids[i],
u'organization': gen_string('alphanumeric'),
})
random_data['config-files-latest'].append({
u'key': 'org_id',
u'key_id': type(u'')(i + 1),
u'org_id': org_ids[i],
})
return (random_data,)
class TestImport(CLITestCase):
"""Import CLI tests.
All default tests pass no options to the imprt object
In such case methods download a default data set from URL
specified in robottelo.properties.
"""
@classmethod
def setUpClass(cls):
super(TestImport, cls).setUpClass()
# prepare the default dataset
cls.default_dataset = prepare_import_data()
cls.default_dataset[1]['content-views'] = os.path.join(
cls.default_dataset[0],
'exports/CHANNELS/export.csv',
)
@classmethod
def tearDownClass(cls):
ssh.command(u'rm -r {0}'.format(cls.default_dataset[0]))
super(TestImport, cls).tearDownClass()
def test_import_orgs_default(self):
"""@test: Import all organizations from the default data set
(predefined source).
@feature: Import Organizations
@assert: 3 Organizations are created
"""
for test_data in gen_import_org_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
Import.organization({'csv-file': files['users']})
# now to check whether the orgs from csv appeared in satellite
for org in Import.csv_to_dataset([files['users']]):
Org.info({'name': org['organization']})
clean_transdata()
def test_import_orgs_manifests(self):
"""@test: Import all organizations from the default data set
(predefined source) and upload manifests for each of them
@feature: Import Organizations including Manifests
@assert: 3 Organizations are created with 3 manifests uploaded
"""
for test_data in gen_import_org_manifest_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
Import.organization_with_tr_data_manifests({
'csv-file': files['users'],
})
# now to check whether the orgs from csv appeared in satellite
orgs = set(org['name'] for org in Org.list())
imp_orgs = set(
org['organization'] for
org in Import.csv_to_dataset([files['users']])
)
self.assertTrue(imp_orgs.issubset(orgs))
for org in imp_orgs:
manifest_history = Subscription.manifest_history({
'organization': org,
})[3]
self.assertIn('SUCCESS', manifest_history)
clean_transdata()
def test_reimport_orgs_default_negative(self):
"""@test: Try to Import all organizations from the predefined source
and try to import them again
@feature: Import Organizations twice
@assert: 2nd Import will result in No Action Taken
"""
for test_data in gen_import_org_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
Import.organization({'csv-file': files['users']})
orgs_before = Org.list()
Import.organization({'csv-file': files['users']})
self.assertEqual(orgs_before, Org.list())
clean_transdata()
def test_import_orgs_recovery(self):
"""@test: Try to Import organizations with the same name to invoke
usage of a recovery strategy (rename, map, none)
@feature: Import Organizations Recover
@assert: 2nd Import will result in No Action Taken, 3rd one will rename
the new organizations, and the 4th one will map them
"""
for test_data in gen_import_org_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# initial import
Import.organization({'csv-file': files['users']})
# clear the .transition_data to clear the transition mapping
ssh.command('rm -rf "${HOME}"/.transition_data')
# use the 'none' strategy
orgs_before = Org.list()
Import.organization({
'csv-file': files['users'], 'recover': 'none'
})
self.assertEqual(orgs_before, Org.list())
# use the default (rename) strategy
imp_rename = Import.organization_with_tr_data({
'csv-file': files['users'],
})
self.assertEqual(
len(imp_rename[1]), len(test_data['users'])
)
for record in imp_rename[1]:
Org.info({'id': record['sat6']})
Import.organization({
'csv-file': files['users'], 'delete': True
})
# use the 'map' strategy
imp_map = Import.organization_with_tr_data({
'csv-file': files['users'], 'recover': 'map',
})
for record in imp_map[1]:
Org.info({'id': record['sat6']})
Import.organization({
'csv-file': files['users'], 'delete': True
})
clean_transdata()
def test_merge_orgs(self):
"""@test: Try to Import all organizations and their users from CSV
to a mapped organization.
@feature: Import User Mapped Org
@assert: 3 Organizations Mapped and their Users created
in a single Organization
"""
for test_data in gen_import_user_data():
with self.subTest(test_data):
# create a new Organization and prepare CSV files
new_org = make_org()
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
pwdfile = os.path.join(tmp_dir, gen_string('alpha', 6))
files = update_csv_values(files, test_data, tmp_dir)
Import.organization({
'csv-file': files['users'],
'into-org-id': new_org['id'],
'verbose': True,
})
Import.user({
'csv-file': files['users'], 'new-passwords': pwdfile
})
# list users by org-id and check whether
# users from csv are in listing
users = User.list({u'organization-id': new_org['id']})
logins = set(user['login'] for user in users)
imp_users = set(
user['username']
for user in Import.csv_to_dataset([files['users']])
)
self.assertTrue(all((user in logins for user in imp_users)))
clean_transdata()
def test_import_users_default(self):
"""@test: Import all 3 users from the default data set (predefined
source).
@feature: Import Users
@assert: 3 Users created
"""
for test_data in gen_import_user_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
pwdfile = os.path.join(tmp_dir, gen_string('alpha', 6))
Import.organization({'csv-file': files['users']})
Import.user({
'csv-file': files['users'], 'new-passwords': pwdfile,
})
# list the users and check whether
# users from csv are in the listing
logins = set(user['login'] for user in User.list())
imp_users = set(
user['username']
for user in Import.csv_to_dataset([files['users']])
)
self.assertTrue(imp_users.issubset(logins))
clean_transdata()
def test_reimport_users_default_negative(self):
"""@test: Try to Import all users from the
predefined source and try to import them again
@feature: Repetitive User Import
@assert: 2nd Import will result in No Action Taken
"""
for test_data in gen_import_user_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
pwdfile = os.path.join(tmp_dir, gen_string('alpha', 6))
# Import the organizations first
Import.organization({'csv-file': files['users']})
Import.user({
'csv-file': files['users'], 'new-passwords': pwdfile,
})
ssh.command(u'rm -rf {0}'.format(pwdfile))
users_before = set(user['login'] for user in User.list())
Import.user({
'csv-file': files['users'], 'new-passwords': pwdfile,
})
users_after = set(user['login'] for user in User.list())
self.assertTrue(users_after.issubset(users_before))
clean_transdata()
def test_import_users_merge(self):
"""@test: Try to Merge users with the same name using 'merge-users'
option.
@feature: Import Users Map-users
@assert: Users imported in 2nd import are being mapped to the existing
ones with the same name
"""
for test_data in gen_import_user_data():
with self.subTest(test_data):
# prepare the data
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
pwdfiles = [
os.path.join(tmp_dir, gen_string('alpha', 6))
for _ in range(2)
]
# initial import
Import.organization({'csv-file': files['users']})
Import.user({
'csv-file': files['users'],
'new-passwords': pwdfiles[0],
})
# clear the .transition_data to clear the transition mapping
ssh.command('rm -rf "${HOME}"/.transition_data/users*')
# import users using merge-users option
import_merge = Import.user_with_tr_data({
'csv-file': files['users'],
'new-passwords': pwdfiles[1],
'merge-users': True,
})
for record in import_merge[1]:
self.assertNotEqual(User.info({'id': record['sat6']}), '')
clean_transdata()
def test_import_users_recovery(self):
"""@test: Try to Import users with the same name to invoke
usage of a recovery strategy (rename, map, none)
@feature: Import Users Recover
@assert: 2nd Import will rename new users, 3rd one will result
in No Action Taken and 4th import will map them
"""
for test_data in gen_import_user_data():
with self.subTest(test_data):
# prepare the data
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
pwdfiles = [
os.path.join(tmp_dir, gen_string('alpha', 6))
for _ in range(4)
]
# initial import
Import.organization({'csv-file': files['users']})
Import.user({
'csv-file': files['users'],
'new-passwords': pwdfiles[0],
})
# clear the .transition_data to clear the transition mapping
ssh.command('rm -rf "${HOME}"/.transition_data/users*')
# use the default (rename) strategy
import_rename = Import.user_with_tr_data({
'csv-file': files['users'],
'new-passwords': pwdfiles[1],
})
for record in import_rename[1]:
User.info({'id': record['sat6']})
Import.user({'csv-file': files['users'], 'delete': True})
# use the 'none' strategy
users_before = set(user['login'] for user in User.list())
Import.user({
'csv-file': files['users'],
'new-passwords': pwdfiles[2],
'recover': 'none',
})
users_after = set(user['login'] for user in User.list())
self.assertEqual(users_before, users_after)
# use the 'map' strategy
import_map = Import.user_with_tr_data({
'csv-file': files['users'],
'recover': 'map',
'new-passwords': pwdfiles[3],
})
for record in import_map[1]:
User.info({'id': record['sat6']})
# do the cleanup
ssh.command(u'rm -rf {0}'.format(' '.join(pwdfiles)))
clean_transdata()
def test_import_host_collections_default(self):
"""@test: Import all System Groups from the default data set
(predefined source) as the Host Collections.
@feature: Import Host-Collections
@assert: 3 Host Collections created
"""
for test_data in gen_import_hostcol_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
import_org = Import.organization_with_tr_data({
'csv-file': files['users'],
})
Import.host_collection_with_tr_data({
'csv-file': files['system-groups'],
})
# now check whether all HCs from csv are imported
imp_orgs = get_sat6_id(
Import.csv_to_dataset([files['users']]),
import_org[1]
)
for imp_org in imp_orgs:
self.assertNotEqual(
HostCollection.list(
{'organization-id': imp_org['sat6']}
),
[]
)
clean_transdata()
def test_reimport_host_collections_default_negative(self):
"""@test: Try to re-import all System Groups from the default data set
(predefined source) as the Host Collections.
@feature: Repetitive Import Host-Collections
@assert: 3 Host Collections created, no action taken on 2nd Import
"""
for test_data in gen_import_hostcol_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
import_org = Import.organization_with_tr_data({
'csv-file': files['users'],
})
Import.host_collection({'csv-file': files['system-groups']})
hcollections_before = [
HostCollection.list({'organization-id': tr['sat6']})
for tr in import_org[1]
]
self.assertNotEqual(hcollections_before, [])
Import.host_collection({'csv-file': files['system-groups']})
hcollections_after = [
HostCollection.list({'organization-id': tr['sat6']})
for tr in import_org[1]
]
self.assertEqual(hcollections_before, hcollections_after)
clean_transdata()
def test_import_host_collections_recovery(self):
"""@test: Try to Import Collections with the same name to invoke
usage of a recovery strategy (rename, map, none)
@feature: Import HostCollection Recover
@assert: 2nd Import will rename the new collections, 3nd import will
result in No Action Taken and the 4th one will map them
"""
for test_data in gen_import_hostcol_data():
with self.subTest(test_data):
# prepare the data
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# initial import
import_org = Import.organization_with_tr_data({
'csv-file': files['users']
})
Import.host_collection_with_tr_data({
'csv-file': files['system-groups'],
})
# clear the .transition_data to clear the transition mapping
ssh.command(
'rm -rf "${HOME}"/.transition_data/host_collections*'
)
# use the default (rename) strategy
import_hc_rename = Import.host_collection_with_tr_data({
'csv-file': files['system-groups'],
'verbose': True,
})
for record in import_hc_rename[1]:
HostCollection.info({'id': record['sat6']})
Import.host_collection({
'csv-file': files['system-groups'],
'delete': True,
})
# use the 'none' strategy
hc_before = [
HostCollection.list({'organization-id': tr['sat6']})
for tr in import_org[1]
]
Import.host_collection({
'csv-file': files['system-groups'], 'recover': 'none',
})
hc_after = [
HostCollection.list({'organization-id': tr['sat6']})
for tr in import_org[1]
]
self.assertEqual(hc_before, hc_after)
# use the 'map' strategy
import_hc_map = Import.host_collection_with_tr_data({
'csv-file': files['system-groups'],
'recover': 'map',
'verbose': True,
})
for record in import_hc_map[1]:
HostCollection.info({'id': record['sat6']})
clean_transdata()
def test_import_repo_default(self):
"""@test: Import and enable all Repositories from the default data set
(predefined source)
@feature: Import Enable Repositories
@assert: 3 Repositories imported and enabled
"""
for test_data in gen_import_repo_data():
with self.subTest(test_data):
# randomize the values for orgs and repos
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
import_org = Import.organization_with_tr_data({
'csv-file': files['users'],
})
# now proceed with importing the repositories
Import.repository_with_tr_data({
'csv-file': files['repositories'],
'synchronize': True,
'wait': True,
})
# get the sat6 mapping of the imported organizations
imp_orgs = get_sat6_id(
Import.csv_to_dataset([files['users']]),
import_org[1]
)
# now to check whether all repos from csv appeared in satellite
for imp_org in imp_orgs:
self.assertNotEqual(
Repository.list({'organization-id': imp_org['sat6']}),
[],
)
clean_transdata()
def test_reimport_repo_negative(self):
"""@test: Import and enable all Repositories from the default data set
(predefined source), then try to Import Repositories from the same CSV
again.
@feature: Repetitive Import Enable Repositories
@assert: 3 Repositories imported and enabled, second run should trigger
no action.
"""
for test_data in gen_import_repo_data():
with self.subTest(test_data):
# randomize the values for orgs and repos
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
import_org = Import.organization_with_tr_data({
'csv-file': files['users'],
})
# now proceed with importing the repositories
Import.repository_with_tr_data({
'csv-file': files['repositories'],
'synchronize': True,
'wait': True,
})
# get the sat6 mapping of the imported organizations
imp_orgs = get_sat6_id(
Import.csv_to_dataset([files['users']]),
import_org[1]
)
repos_before = [
Repository.list({'organization-id': imp_org['sat6']})
for imp_org in imp_orgs
]
# Reimport the same repos and check for changes in sat6
Import.repository({
'csv-file': files['repositories'],
'synchronize': True,
'wait': True,
})
self.assertEqual(
repos_before,
[
Repository.list({'organization-id': imp_org['sat6']})
for imp_org in imp_orgs
]
)
clean_transdata()
def test_import_repo_recovery(self):
"""@test: Try to Import Repos with the same name to invoke
usage of a recovery strategy (rename, map, none)
@feature: Import Repository Recover
@assert: 2nd Import will rename the new repos, 3rd import will
map them and the 4th one will result in No Action Taken
"""
for test_data in gen_import_repo_data():
with self.subTest(test_data):
# prepare the data
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
import_org = Import.organization_with_tr_data({
'csv-file': files['users'],
})
Import.repository_with_tr_data({
'csv-file': files['repositories'],
})
# clear the .transition_data to clear the transition mapping
ssh.command('rm -rf "${HOME}"/.transition_data/repositories*')
ssh.command('rm -rf "${HOME}"/.transition_data/products*')
# use the default (rename) strategy
import_repo_rename = Import.repository_with_tr_data({
'csv-file': files['repositories'], 'verbose': True,
})
for record in import_repo_rename[1][1]:
Repository.info({'id': record['sat6']})
Import.repository({
'csv-file': files['repositories'], 'delete': True,
})
# use the 'none' strategy
repos_before = [
Repository.list({'organization-id': tr['sat6']})
for tr in import_org[1]
]
Import.repository({
'csv-file': files['repositories'],
'recover': 'none',
})
self.assertEqual(
repos_before,
[Repository.list({'organization-id': tr['sat6']})
for tr in import_org[1]],
)
# use the 'map' strategy
import_repo_map = Import.repository_with_tr_data({
'csv-file': files['repositories'],
'recover': 'map',
'verbose': True,
})
for record in import_repo_map[1][1]:
Repository.info({'id': record['sat6']})
clean_transdata()
def test_import_cv_default(self):
"""@test: Import and enable all Content Views from the default data set
(predefined source)
@feature: Import Enable Content View
@assert: 3 Content Views imported and enabled
"""
for test_data in gen_import_cv_data():
with self.subTest(test_data):
# randomize the values for orgs and repos
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
import_org = Import.organization_with_tr_data({
'csv-file': files['users'],
})
Import.repository_with_tr_data({
'csv-file': files['repositories'],
'synchronize': True,
'wait': True,
})
# now proceed with Content View import
Import.content_view_with_tr_data({
'csv-file': files['content-views'],
'dir': os.path.join(tmp_dir, 'exports/CHANNELS'),
})
# get the sat6 mapping of the imported organizations
imp_orgs = get_sat6_id(
Import.csv_to_dataset([files['users']]),
import_org[1]
)
# now check whether all CVs from csv are imported
for imp_org in imp_orgs:
self.assertNotEqual(
ContentView.list({'organization-id': imp_org['sat6']}),
[]
)
clean_transdata()
def test_reimport_cv_negative(self):
"""@test: Import and enable all Content Views from the default data set
(predefined source), then try to Import them from the same CSV
again.
@feature: Repetitive Import Content Views
@assert: 3 Content Views imported and enabled, 2nd run should trigger
no action.
"""
for test_data in gen_import_cv_data():
with self.subTest(test_data):
# randomize the values for orgs and repos
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
import_org = Import.organization_with_tr_data({
'csv-file': files['users'],
})
Import.repository_with_tr_data({
'csv-file': files['repositories'],
'synchronize': True,
'wait': True,
})
Import.content_view_with_tr_data({
'csv-file': files['content-views'],
'dir': os.path.join(tmp_dir, 'exports/CHANNELS'),
})
# get the sat6 mapping of the imported organizations
imp_orgs = get_sat6_id(
Import.csv_to_dataset([files['users']]),
import_org[1]
)
cvs_before = [
ContentView.list({'organization-id': imp_org['sat6']})
for imp_org in imp_orgs
]
# Reimport the same content views and check for changes in sat6
Import.content_view({
'csv-file': files['content-views'],
'dir': os.path.join(tmp_dir, 'exports/CHANNELS'),
})
self.assertEqual(
cvs_before,
[
ContentView.list({'organization-id': imp_org['sat6']})
for imp_org in imp_orgs
]
)
clean_transdata()
def test_import_cv_recovery(self):
"""@test: Try to Import Content Views with the same name to invoke
usage of a recovery strategy (rename, map, none)
@feature: Import Content View Recover
@assert: 2nd Import will rename the new Content Views, 3rd import will
map them and the 4th one will result in No Action Taken
"""
for test_data in gen_import_cv_data():
with self.subTest(test_data):
# prepare the data
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
import_org = Import.organization_with_tr_data({
'csv-file': files['users'],
})
Import.repository_with_tr_data({
'csv-file': files['repositories'],
})
Import.content_view_with_tr_data({
'csv-file': files['content-views'],
'dir': os.path.join(tmp_dir, 'exports/CHANNELS'),
})
# clear the .transition_data to clear the transition mapping
ssh.command('rm -rf "${HOME}"/.transition_data/repositories*')
ssh.command('rm -rf "${HOME}"/.transition_data/products*')
ssh.command('rm -rf "${HOME}"/.transition_data/content_views*')
# use the default (rename) strategy
import_cv_rename = Import.content_view_with_tr_data({
'csv-file': files['content-views'],
'dir': os.path.join(tmp_dir, 'exports/CHANNELS'),
'verbose': True,
})
for record in import_cv_rename[1]:
ContentView.info({'id': record['sat6']})
Import.content_view({
'csv-file': files['content-views'],
'delete': True,
})
# use the 'none' strategy
cvs_before = [
ContentView.list({'organization-id': tr['sat6']})
for tr in import_org[1]
]
Import.content_view({
'csv-file': files['content-views'],
'dir': os.path.join(tmp_dir, 'exports/CHANNELS'),
'recover': 'none',
})
cvs_after = [
ContentView.list({'organization-id': tr['sat6']})
for tr in import_org[1]
]
self.assertEqual(cvs_before, cvs_after)
# use the 'map' strategy
import_cvs_map = Import.content_view_with_tr_data({
'csv-file': files['content-views'],
'dir': os.path.join(tmp_dir, 'exports/CHANNELS'),
'recover': 'map',
'verbose': True,
})
for record in import_cvs_map[1]:
ContentView.info({'id': record['sat6']})
clean_transdata()
def test_bz1160847_translate_macros(self):
"""@test: Check whether all supported Sat5 macros are being properly
converted to the Puppet facts.
According to RH Transition Guide (Chapter 3.7.8, Table 3.1)
@feature: Import config-file --csv-file --generate-only
@assert: Generated .erb file contains correctly formatted puppet facts
"""
# prepare data (craft csv)
test_data = [
{
u'name': u'hostname',
u'macro': u'{| rhn.system.hostname |}',
u'fact': u'<%= @fqdn %>',
},
{
u'name': u'sys_ip_address',
u'macro': u'{| rhn.system.ip_address |}',
u'fact': u'<%= @ipaddress %>',
},
{
u'name': u'ip_address',
u'macro': u'{| rhn.system.net_interface'
u'.ip_address(eth0) |}',
u'fact': u'<%= @ipaddress_eth0 %>',
},
{
u'name': u'netmask',
u'macro': u'{| rhn.system.net_interface'
u'.netmask(eth0) |}',
u'fact': u'<%= @netmask_eth0 %>',
},
{
u'name': u'mac_address',
u'macro': u'{| rhn.system.net_interface.'
u'hardware_address(eth0) |}',
u'fact': u'<%= @macaddress_eth0 %>',
},
]
csv_contents = u'\n'.join(
u'{0}={1}'.format(i['name'], i['macro']) for i in test_data
)
csv_row = {
u'org_id': u'1',
u'channel_id': u'3',
u'channel': u'config-1',
u'channel_type': u'normal',
u'path': gen_string('utf8') + gen_string('alphanumeric'),
u'file_type': u'file',
u'file_id': u'8',
u'revision': u'1',
u'is_binary': u'N',
u'contents': u'{0}\n'.format(csv_contents),
u'delim_start': u'{|',
u'delim_end': u'|}',
u'username': u'root',
u'groupname': u'root',
u'filemode': u'600',
u'symbolic_link': u'',
u'selinux_ctx': u'',
}
file_name = build_csv_file([csv_row], self.default_dataset[0])
invalid_chars = '[^\da-zA-Z\-\.\_]'
# create a random org that will be mapped to sat5 org with id = 1
if bz_bug_is_open(1226981):
org_data = {'name': gen_string('alphanumeric')}
else:
org_data = {'name': gen_string('utf8')}
org = make_org(org_data)
trans_header = [u'sat5', u'sat6', u'delete']
trans_row = [u'1', org['id'], u'']
transition_data_file = tempfile.mkstemp(
prefix='organizations-',
suffix='.csv',
)[1]
with open(transition_data_file, 'wb') as trans_csv:
csv_writer = csv.writer(trans_csv)
csv_writer.writerow(trans_header)
csv_writer.writerow(trans_row)
ssh.command('mkdir -p ~/.transition_data')
ssh.upload_file(
transition_data_file,
os.path.join(
'.transition_data/', os.path.basename(transition_data_file)
)
)
os.remove(transition_data_file)
# run the import command
Import.config_file({
u'csv-file': file_name,
u'generate-only': True,
})
prefix = re.sub(invalid_chars, '', org['name'])
erb_file = re.sub(invalid_chars, '', csv_row['path'])
if len(prefix) == 0:
prefix = u'orgid' + org['id']
if len(erb_file) == 0:
erb_file = u'file_id8'
# collect the contains of the generated file
cat_cmd = ssh.command(
u'cat "${{HOME}}"/puppet_work_dir/{0}-config_1/templates/'
u'{1}.erb'.format(prefix.lower(), erb_file)
)
# compare the contains with the expected format
self.assertEqual(
cat_cmd.stdout[:-1],
[fact['name'] + '=' + fact['fact'] for fact in test_data],
)
clean_transdata()
def test_import_enable_rh_repos(self):
"""@test: Import and enable all red hat repositories from predefined
dataset
@feature: Import Enable RH Repositories
@assert: All Repositories imported and synchronized
"""
for test_data in gen_import_rh_repo_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(dict(files), test_data, tmp_dir)
rh_repos = [
repo for repo in Import.csv_to_dataset([files['channels']])
if (
repo['channel_name'].startswith('Red Hat') or
repo['channel_name'].startswith('RHN')
)
]
# import the prerequisities (organizations with manifests)
import_org = Import.organization_with_tr_data_manifests({
'csv-file': files['users'],
})
Import.repository_enable_with_tr_data({
'csv-file': files['channels'],
'synchronize': True,
'wait': True,
})
# verify rh repos appended in every imported org
for record in product(rh_repos, import_org[1]):
self.assertNotEqual(
Repository.list({
u'organization-id': record[1]['sat6'],
u'name': Import.repos[record[0]['channel_label']]
}),
[]
)
self.assertNotEqual(
ContentView.info({
u'organization-id': record[1]['sat6'],
u'name': record[0]['channel_name']
}),
[]
)
clean_transdata()
def test_reimport_enable_rh_repos_negative(self):
"""@test: Repetitive Import and enable of all red hat repositories from
the predefined dataset
@feature: Repetitive Import Enable RH Repositories
@assert: All Repositories imported and synchronized only once
"""
for test_data in gen_import_rh_repo_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities (organizations with manifests)
import_org = Import.organization_with_tr_data_manifests({
'csv-file': files['users'],
})
Import.repository_enable({
'csv-file': files['channels'],
'synchronize': True,
'wait': True,
})
# verify rh repos appended in every imported org
repos_before, cvs_before = verify_rh_repos(
import_org[1], files['channels']
)
self.assertFalse([] in repos_before)
self.assertFalse([] in cvs_before)
Import.repository_enable({
'csv-file': files['channels'],
'synchronize': True,
'wait': True,
})
# compare after and before to make sure
# nothing has changed after 2nd import
self.assertEqual(
(repos_before, cvs_before),
verify_rh_repos(
import_org[1], files['channels']
)
)
clean_transdata()
def test_import_content_hosts_default(self):
"""@test: Import all content hosts from
the predefined dataset
@feature: Import Content-host
@assert: Profiles for all Content Hosts created
"""
for test_data in gen_import_chost_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisites and content hosts
imports = import_content_hosts(files, tmp_dir)
# get the sat6 mapping of the imported organizations
imp_orgs = get_sat6_id(
Import.csv_to_dataset([files['users']]),
imports['organizations'][1]
)
# now to check whether all cont. hosts appeared in satellite
for imp_org in imp_orgs:
self.assertNotEqual(
ContentHost.list({'organization-id': imp_org['sat6']}),
[]
)
clean_transdata()
def test_reimport_content_hosts_negative(self):
"""@test: Repetitive Import of all content hosts from
the predefined dataset
@feature: Repetitive Import Content-host
@assert: Profiles for all Content Hosts created only once
"""
for test_data in gen_import_chost_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisites and content hosts
imports = import_content_hosts(files, tmp_dir)
# get the sat6 mapping of the imported organizations
imp_orgs = get_sat6_id(
Import.csv_to_dataset([files['users']]),
imports['organizations'][1]
)
chosts_before = [
ContentHost.list({'organization-id': imp_org['sat6']})
for imp_org in imp_orgs
]
Import.content_host_with_tr_data({
u'csv-file': files['system-profiles'],
u'export-directory': tmp_dir,
u'verbose': True
})
self.assertEqual(
[
ContentHost.list({'organization-id': imp_org['sat6']})
for imp_org in imp_orgs
],
chosts_before
)
clean_transdata()
@skip_if_bug_open('bugzilla', 1267224)
def test_import_content_hosts_recovery_negative(self):
"""@test: Try to invoke usage of a recovery strategy
@feature: Import Content Hosts Recover
@assert: No such option exists, error is shown
"""
for test_data in gen_import_chost_data():
with self.subTest(test_data):
# prepare the data
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
import_content_hosts(files, tmp_dir)
# clear the .transition_data to clear the transition mapping
ssh.command(
'rm -rf "${{HOME}}"/.transition_data/system*'
'{0}/SOURCES {0}/SPECS'
.format(tmp_dir)
)
# use the rename strategy
with self.assertRaises(CLIReturnCodeError):
Import.content_host_with_tr_data({
u'csv-file': files['system-profiles'],
u'export-directory': tmp_dir,
u'recover': u'rename',
})
clean_transdata()
def test_import_snippets_default(self):
"""@test: Import template snippets from the default data set
(predefined source)
@feature: Import Template Snippets
@assert: All Snippets imported
"""
for test_data in gen_import_snippet_data():
with self.subTest(test_data):
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
# randomize the values for orgs and snippets
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
Import.organization_with_tr_data({'csv-file': files['users']})
# list and save templates before import
before = Template.list()
# now proceed with importing the template snippets
import_snippet = Import.template_snippet_with_tr_data({
'csv-file': files['kickstart-scripts'],
'verbose': True,
})
# list and save templates after import
after = Template.list()
# difference between before and after import
diff = [d for d in after if d not in before]
diff_ids = [d[u'id'] for d in diff]
mapping = import_snippet[1][0]
# check that snippets have been properly imported
for row in mapping:
template = Template.info({u'id': row[u'sat6']})
self.assertTrue(template[u'id'] in diff_ids)
self.assertTrue(template[u'type'] == u'snippet')
clean_transdata()
def test_import_config_files_default(self):
"""@test: Import all Config Files from the default data set
(predefined source)
@feature: Import Config Files
@assert: All Config Files are imported
"""
for test_data in gen_import_config_files_data():
with self.subTest(test_data):
# randomize the values for orgs and repos
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
Import.organization_with_tr_data(
{'csv-file': files['users']}
)
# now proceed with Config Files import
import_cf = Import.config_file_with_tr_data({
'csv-file': files['config-files-latest'],
'verbose': True,
})
configs = Import.csv_to_dataset([files['config-files-latest']])
imp_configs = get_sat6_id(
configs,
import_cf[1][1],
'channel_id',
'channel_id'
)
for rec in imp_configs:
self.assertEqual(
rec['channel'],
Repository.info({'id': rec['sat6']})['name']
)
clean_transdata()
def test_reimport_config_files_negative(self):
"""@test: Repetitive Import of all Config Files from the default
data set (predefined source)
@feature: Repetitive Import Config Files
@assert: All Config Files are imported only once
"""
for test_data in gen_import_config_files_data():
with self.subTest(test_data):
# randomize the values for orgs and repos
tmp_dir = self.default_dataset[0]
files = dict(self.default_dataset[1])
files = update_csv_values(files, test_data, tmp_dir)
# import the prerequisities
Import.organization_with_tr_data(
{'csv-file': files['users']}
)
# initial import
import_cf = Import.config_file_with_tr_data({
'csv-file': files['config-files-latest'],
'verbose': True,
})
configs = Import.csv_to_dataset([files['config-files-latest']])
imp_configs = get_sat6_id(
configs,
import_cf[1][1],
'channel_id',
'channel_id'
)
cf_before = [
Repository.info({'id': rec['sat6']})
for rec in imp_configs
]
cf_after = [
Repository.info({'id': rec['sat6']})
for rec in imp_configs
]
self.assertEqual(cf_before, cf_after)
clean_transdata()
|
abalakh/robottelo
|
tests/foreman/cli/test_import.py
|
Python
|
gpl-3.0
| 64,380
|
# -*- coding: UTF-8 -*-
#
# Copyright © 2003 - 2018 Michal Čihař <michal@cihar.com>
#
# This file is part of Wammu <https://wammu.eu/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
'''
Wammu - Phone manager
Main Wammu application
'''
from __future__ import unicode_literals
from __future__ import print_function
import wx
import sys
import Wammu.Main
import Wammu.Error
from Wammu.Locales import StrConv
from Wammu.Locales import ugettext as _
class WammuApp(wx.App):
'''
Wammu appliction class, it initializes wx and creates main Wammu window.
'''
def OnInit(self):
'''
wxWindows call this method to initialize the application.
'''
self.locale = wx.Locale(wx.LANGUAGE_DEFAULT)
self.SetAppName('Wammu')
vendor = StrConv('Michal Čihař')
if vendor.find('?') != -1:
vendor = 'Michal Čihař'
self.SetVendorName(vendor)
frame = Wammu.Main.WammuFrame(None, -1)
Wammu.Error.HANDLER_PARENT = frame
frame.Show(True)
frame.PostInit(self)
self.SetTopWindow(frame)
# Return a success flag
return True
def Run():
'''
Wrapper to execute Wammu. Installs graphical error handler and launches
WammuApp.
'''
try:
sys.excepthook = Wammu.Error.Handler
except:
print(_('Failed to set exception handler.'))
app = WammuApp()
app.MainLoop()
|
gammu/wammu
|
Wammu/App.py
|
Python
|
gpl-3.0
| 2,021
|
""" Convert an OpenTripPlanner json itinerary response into something that's more suitable for rendering via a webpage
"""
import re
import sys
import math
from decimal import *
import datetime
from datetime import timedelta
import simplejson as json
from ott.utils import object_utils
from ott.utils import date_utils
from ott.utils import json_utils
import logging
log = logging.getLogger(__file__)
def remove_agency_from_id(id):
""" OTP 1.0 has TriMet:1 for trip and route ids
"""
ret_val = id
if id and ":" in id:
v = id.split(":")
if v and len(v) > 1 and len(v[1]) > 0:
ret_val = v[1].strip()
return ret_val
class Error(object):
def __init__(self, jsn, params=None):
self.id = jsn['id']
self.msg = jsn['msg']
class DateInfo(object):
def __init__(self, jsn):
# import pdb; pdb.set_trace()
self.start_time_ms = jsn['startTime']
self.end_time_ms = jsn['endTime']
start = datetime.datetime.fromtimestamp(self.start_time_ms / 1000)
end = datetime.datetime.fromtimestamp(self.end_time_ms / 1000)
self.start_date = "{}/{}/{}".format(start.month, start.day, start.year) # 2/29/2012
self.end_date = "{}/{}/{}".format(end.month, end.day, end.year) # 2/29/2012
self.start_time = start.strftime(" %I:%M%p").lower().replace(' 0','') # "3:40pm" -- note, keep pre-space
self.end_time = end.strftime(" %I:%M%p").lower().replace(' 0','') # "3:44pm" -- note, keep pre-space
# service_date is important to link off to proper stop timetables
# in OTP 1.0, we have: <serviceDate>20161123</serviceDate>
# in older versions of OTP, there's no such date so set it to start_date
if 'serviceDate' in jsn and len(jsn['serviceDate']) == 8:
syear = jsn['serviceDate'][0:4]
smonth = jsn['serviceDate'][4:6].lstrip('0')
sday = jsn['serviceDate'][6:].lstrip('0')
self.service_date = "{}/{}/{}".format(smonth, sday, syear) # 2/29/2012
else:
self.service_date = self.estimate_service_date(start)
# OTP 1.0 has seconds not millisecs for duration
durr = int(jsn['duration'])
if durr < 60000:
durr = durr * 1000
self.duration_ms = durr
self.duration = ms_to_minutes(self.duration_ms, is_pretty=True, show_hours=True)
self.date = "%d/%d/%d" % (start.month, start.day, start.year) # 2/29/2012
self.pretty_date = start.strftime("%A, %B %d, %Y").replace(' 0',' ') # "Monday, March 4, 2013"
self.day = start.day
self.month = start.month
self.year = start.year
def estimate_service_date(self, start):
""" in OTP 1.0, we are provided a service_date that's very important to linking to proper schedules, etc...
but in prior versions, we are missing service_date, so this rountine is going to calculate service date
this way: if the hour is earier than 3am, then use 'yesterday' as the service date. This is a hack that
works for agencies like TriMet, which do not have Owl service.
NOTE: there are often instances in parsing OTP 1.0 (non Legs) that also don't have a service_date attribute,
so this routine will also be called. (Service date is mostly used for linking a transit leg
to a stop schedule, so...)
"""
d = start
if start.hour < 3:
""" yesterday calculation for times less than 3am """
d = start - timedelta(days=1)
ret_val = "{}/{}/{}".format(d.month, d.day, d.year) # 2/29/2012
return ret_val
class DateInfoExtended(DateInfo):
"""
"""
def __init__(self, jsn):
super(DateInfoExtended, self).__init__(jsn)
self.extended = True
# step 1: get data
walk = get_element(jsn, 'walkTime', 0)
tran = get_element(jsn, 'transitTime', 0)
wait = get_element(jsn, 'waitingTime', 0)
tot = walk + tran + wait
# step 2: trip length
h,m = seconds_to_hours_minutes(tot)
self.total_time_hours = h
self.total_time_mins = m
self.duration_min = int(round(tot / 60))
# step 3: transit info
h,m = seconds_to_hours_minutes(tran)
self.transit_time_hours = h
self.transit_time_mins = m
self.start_transit = "TODO"
self.end_transit = "TODO"
# step 4: bike / walk length
self.bike_time_hours = None
self.bike_time_mins = None
self.walk_time_hours = None
self.walk_time_mins = None
if 'mode' in jsn and jsn['mode'] == 'BICYCLE':
h,m = seconds_to_hours_minutes(walk)
self.bike_time_hours = h
self.bike_time_mins = m
else:
h,m = seconds_to_hours_minutes(walk)
self.walk_time_hours = h
self.walk_time_mins = m
# step 5: wait time
h,m = seconds_to_hours_minutes(wait)
self.wait_time_hours = h
self.wait_time_mins = m
# step 5: drive time...unused as of now...
self.drive_time_hours = None
self.drive_time_mins = None
self.text = self.get_text()
def get_text(self):
"""
"""
ret_val = ''
tot = hour_min_string(self.total_time_hours, self.total_time_mins)
walk = hour_min_string(self.walk_time_hours, self.walk_time_mins)
bike = hour_min_string(self.bike_time_hours, self.bike_time_mins)
wait = hour_min_string(self.wait_time_hours, self.wait_time_mins)
return ret_val
class Elevation(object):
def __init__(self, steps):
self.points = None
self.points_array = None
self.distance = None
self.start_ft = None
self.end_ft = None
self.high_ft = None
self.low_ft = None
self.rise_ft = None
self.fall_ft = None
self.grade = None
self.distance = self.make_distance(steps)
self.points_array, self.points = self.make_points(steps)
self.grade = self.find_max_grade(steps)
self.set_marks()
@classmethod
def make_distance(cls, steps):
""" loop through distance
"""
ret_val = None
try:
dist = 0
for s in steps:
dist += s['distance']
ret_val = dist
except Exception as ex:
log.warning(ex)
return ret_val
@classmethod
def make_point_string(cls, points, max_len=50):
"""
"""
points_array = points
if len(points) > (max_len * 1.15):
# reduce the point array down to something around the size of max_len (or smaller)
points_array = []
# slice the array up into chunks
# @see http://stackoverflow.com/questions/1335392/iteration-over-list-slices (thank you Nadia)
slice_size = int(round(len(points) / max_len))
if slice_size == 1:
slice_size = 2
list_of_slices = zip(*(iter(points),) * slice_size)
# average up the slices
for s in list_of_slices:
avg = sum(s) / len(s)
points_array.append(avg)
points_string = ','.join(["{0:.2f}".format(p) for p in points_array])
return points_string
@classmethod
def make_points(cls, steps):
""" parse leg for list of elevation points and distances
"""
points_array = None
points_string = None
try:
points = []
for s in steps:
for e in s['elevation']:
elev = e['second']
dist = e['first']
points.append(round(elev, 2))
if len(points) > 0:
points_array = points
points_string = cls.make_point_string(points)
except Exception as e:
log.warning(e)
return points_array, points_string
@classmethod
def find_max_grade(cls, steps):
""" parse leg for list of elevation points and distances
"""
r = {'up': 0, 'down': 0, 'ue': 0, 'ud': 0, 'de': 0, 'dd': 0}
ret_val = r
try:
for s in steps:
first = True
going_up = False
for e in s['elevation']:
dist = e['first']
elev = e['second']
if first:
first = False
r['ue'] = elev
r['ud'] = dist
r['de'] = elev
r['dd'] = dist
else:
# going up
if elev > r['lue']:
# set up vals
r['lue'] = elev
r['lud'] = dist
# set down vals
going_up = True
elif elev < r['lue']:
last_elev = elev
except Exception as e:
log.warning(e)
return ret_val
def set_marks(self):
""" finds start / end / high / low
"""
try:
start = self.points_array[0]
end = self.points_array[len(self.points_array) - 1]
high = self.points_array[0]
low = self.points_array[0]
rise = 0.0
fall = 0.0
slope = 0.0
# find high, low and rise, fall points
last = self.points_array[0]
for p in self.points_array:
if p > high:
high = p
if p < low:
low = p
if p > last:
rise += (p - last)
if p < last:
fall += (p - last)
last = p
# end results as strings with 2 decimal places
self.start_ft = "{0:.1f}".format(start)
self.end_ft = "{0:.1f}".format(end)
self.high_ft = "{0:.1f}".format(high)
self.low_ft = "{0:.1f}".format(low)
# find how much of a rise and fall in feet there are from the avg height
self.rise_ft = "{0:.1f}".format(rise)
self.fall_ft = "{0:.1f}".format(fall)
except Exception as e:
log.warning(e)
class Place(object):
def __init__(self, jsn, name=None):
""" """
self.name = jsn['name']
self.lat = jsn['lat']
self.lon = jsn['lon']
self.stop = Stop.factory(jsn, self.name)
self.map_img = self.make_img_url(lon=self.lon, lat=self.lat, icon=self.endpoint_icon(name))
def endpoint_icon(self, name):
""" """
ret_val = ''
if name:
x='/extraparams/format_options=layout:{0}'
if name in ['to', 'end', 'last']:
ret_val = x.format('end')
elif name in ['from', 'start', 'begin']:
ret_val = x.format('start')
return ret_val
def make_img_url(self, url="//maps.trimet.org/eapi/ws/V1/mapimage/format/png/width/300/height/288/zoom/8/coord/%(lon)s,%(lat)s%(icon)s", **kwargs):
return url % kwargs
def append_url_params(self, route=None, month=None, day=None):
if self.stop:
self.stop.append_params_schedule_url(route, month, day)
self.stop.append_params_info_url(month, day)
@classmethod
def factory(cls, jsn, obj=None, name=None):
""" will create a Place object from json (jsn) data,
optionally assign the resultant object to some other object, as this alleviates the awkward
construct of 'from' that uses a python keyword, (e.g., self.__dict__['from'] = Place(j['from'])
"""
p = Place(jsn, name)
if obj and name:
obj.__dict__[name] = p
return p
class Alert(object):
def __init__(self, jsn, route_id=None):
self.type = 'ROUTE'
self.route_id = route_id
text = url = start_date = None
try:
""" OTP 0.10.x format
"alerts":[
{
"alertDescriptionText":{
"translations":{"":"The westbound stop on NE Dekum at M L King is closed for construction. Use stop at 6th."},
"someTranslation":"The westbound stop on NE Dekum at M L King is closed for construction. Use stop at 6th."},
"alertUrl":{
"translations":{"":"http://trimet.org/alerts/"},
"someTranslation":"http://trimet.org/alerts/"},
},
"effectiveStartDate":1473674400000
}]
"""
text = jsn['alertDescriptionText']['someTranslation']
url = jsn['alertUrl']['someTranslation']
start_date = jsn['effectiveStartDate']
except:
try:
""" OTP 1.0 format
"alerts":[
{
"alertDescriptionText":"The westbound stop on NE Dekum at M L King is closed for construction. Use stop at 6th.",
"effectiveStartDate":1473674400000,
"alertUrl":"http://trimet.org/alerts/"
}
]
"""
text = jsn['alertDescriptionText']
url = jsn['alertUrl']
start_date = jsn['effectiveStartDate']
except:
log.warn("couldn't parse alerts")
self.text = text
self.url = url
# make sure we have a valid start date datetime
try:
dt = datetime.datetime.fromtimestamp(start_date / 1000)
self.start_date = start_date
except Exception as e:
# import pdb; pdb.set_trace()
dt = datetime.datetime.now()
self.start_date = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
self.start_date_pretty = dt.strftime("%B %d").replace(' 0',' ') # "Monday, March 4, 2013"
self.start_time_pretty = dt.strftime(" %I:%M %p").replace(' 0',' ').lower().strip() # "1:22 pm"
self.long_term = True if datetime.datetime.today() - dt > timedelta(days=35) else False
self.future = True if dt > datetime.datetime.today() else False
# TODO: trimet hack (eliminate me)
if "trimet.org" in self.url:
self.url = "http://trimet.org/#alerts/"
if self.route_id:
self.url = "{0}{1}".format(self.url, self.route_id)
@classmethod
def factory(cls, jsn, route_id=None, def_val=None):
""" returns either def_val (when no alerts in the jsn input), or a list of [Alert]s
"""
ret_val = def_val
try:
if jsn and len(jsn) > 0:
ret_val = []
for a in jsn:
alert = Alert(a, route_id)
ret_val.append(alert)
except Exception as e:
log.warning(e)
return ret_val
class Fare(object):
"""
"""
def __init__(self, jsn, fares):
self.adult = self.get_fare(jsn, '$2.50')
if fares:
self.adult_day = fares.query("adult_day", "$5.00")
self.honored = fares.query("honored", "$1.25")
self.honored_day = fares.query("honored_day", "$2.50")
self.youth = fares.query("youth", "$1.25")
self.youth_day = fares.query("youth_day", "$2.50")
self.tram = fares.query("tram", "$4.70")
self.notes = fares.query("notes")
def get_fare(self, jsn, def_val):
""" TODO -- need to figure out exceptions and populate self.note
1) TRAM (GONDOLA) fare
2) $5.00 one-way fare, when the trip lasts longer than the transfer window
"""
ret_val = def_val
try:
c = int(jsn['fare']['fare']['regular']['cents']) * 0.01
s = jsn['fare']['fare']['regular']['currency']['symbol']
ret_val = "%s%.2f" % (s, c)
except Exception as e:
pass
return ret_val
def update_fare_info(self, def_val):
""" read (periodically) a config file containing all fares an agency might present
"""
ret_val = def_val
try:
if datetime.now() - self.last_update > timedelta(minutes = self.avert_timeout):
log.warning("updating the advert content")
self.last_update = datetime.now()
except Exception as e:
log.warning("ERROR updating the advert content {0}".format(e))
return ret_val
class Stop(object):
"""
"""
def __init__(self, jsn, name=None):
# OLD OTP: "stop": {"agencyId":"TriMet", "name":"SW Arthur & 1st", "id":"143","info":"stop.html?stop_id=143", "schedule":"stop_schedule.html?stop_id=143"},
# NEW OTP: "from": { "name":"SE 13th & Lambert","stopId":"TriMet:6693","stopCode":"6693","lon":-122.652906,"lat":45.468484,"arrival":1478551773000,"departure":1478551774000,"zoneId":"B","stopIndex":11,"stopSequence":12,"vertexType":"TRANSIT"}
self.name = name
self.agency = None
self.id = None
self.get_id_and_agency(jsn)
self.info = self.make_info_url(id=self.id)
self.schedule = self.make_schedule_url(id=self.id)
def get_id_and_agency(self, jsn):
try:
# *.10.x format -- "stopId":{"agencyId":"TRIMET","id":"10579"}
self.id = jsn['id']
self.agency = jsn['agencyId']
except Exception as e:
# 1.0.x format -- "stopId":"TriMet:10579",
try:
s = jsn.split(':')
self.id = s[1].strip()
self.agency = s[0].strip()
except Exception as e:
log.warn("couldn't parse AGENCY nor ID from stop")
def make_info_url(self, url="stop.html?stop_id=%(id)s", **kwargs):
return url % kwargs
def make_schedule_url(self, url="stop_schedule.html?stop_id=%(id)s", **kwargs):
return url % kwargs
def append_params_schedule_url(self, route, month, day):
if self.schedule:
if route:
self.schedule += "&route={0}".format(route)
if month and day:
self.schedule += "&month={0}&day={1}".format(month, day)
def append_params_info_url(self, month, day):
if self.info:
if month and day:
self.info += "&month={0}&day={1}".format(month, day)
@classmethod
def factory(cls, jsn, name=None):
ret_val = None
stop_jsn = get_element(jsn, 'stopId')
if stop_jsn:
s = Stop(stop_jsn, name)
ret_val = s
return ret_val
class Route(object):
def __init__(self, jsn):
# TODO IMPORTANT
# TODO We should probably use ott.data's DAO objects here ... very confusing to have multiple routes
# TODO I know I wanted otp_to_ott.py to be standalone, but maybe that's a bad idea in terms of maintenance
# TODO IMPORTANT
# TODO this code is part of view.AgencyTemplate ... use a version of util.AgencyTemplate in the FUTURE
self.route_id_cleanup = '\D.*'
self.agency_id = jsn['agencyId']
self.agency_name = get_element(jsn, 'agencyName')
self.id = remove_agency_from_id(jsn['routeId'])
self.name = self.make_name(jsn)
self.headsign = get_element(jsn, 'headsign')
self.trip = remove_agency_from_id(get_element(jsn, 'tripId'))
url = self.url = get_element(jsn, 'url')
if url is None:
url = self.url = get_element(jsn, 'agencyUrl')
self.url = url
self.schedulemap_url = url
# http://www.c-tran.com/routes/2route/map.html
# http://trimet.org/schedules/r008.htm
if self.agency_id.lower() == 'trimet':
self.url = self.make_route_url("http://trimet.org/schedules/r{0}.htm")
self.schedulemap_url = self.make_route_url("http://trimet.org/images/schedulemaps/{0}.gif")
elif self.agency_id.lower() == 'psc':
self.url = self.make_route_url("http://www.portlandstreetcar.org/node/3")
self.schedulemap_url = self.make_route_url("http://www.portlandstreetcar.org/node/4")
elif self.agency_id.lower() == 'c-tran':
self.url = "http://c-tran.com/routes/{0}route/index.html".format(self.id)
self.schedulemap_url = "http://c-tran.com/images/routes/{0}map.png".format(self.id)
# TODO this code is part of view.AgencyTemplate ... use a version of util.AgencyTemplate in the FUTURE
def clean_route_id(self, route_id):
""" cleans the route_id parameter. needed because TriMet started using id.future type route ids for route name changes
"""
ret_val = route_id
if self.route_id_cleanup:
ret_val = re.sub(self.route_id_cleanup, '', route_id)
return ret_val
""" TODO: move to a single class that allows any agency to override & customize """
def make_route_url(self, template):
""" remove trailing x on route id, fill out the id with 3 zeros, pump that id thru the url template
"""
id = self.clean_route_id(self.id)
id = id.zfill(3)
id = template.format(id)
return id
def make_name(self, jsn, name_sep='-', def_val=''):
""" create a route name based on the returned json and the long & short names
NOTE: we also handle a special case for interline legs
"""
ret_val = def_val
# step 1: interline name will use jsn['route'] in certain circumstances
# NOTE: we get some funky things with interline in the recent OTP code, where the record is the previous route
# not the new interline route. So we'll build a name like MAX Yellow Line from the
# crap data we have fix this (temporarily)
ln = get_element(jsn, 'routeLongName')
if Leg.is_interline(jsn) and 'route' in jsn and len(jsn['route']) > 0 and not (jsn['route'] in ln or ln in jsn['route']):
ret_val = jsn['route']
else:
# step 2: build up a route name using the short and long name(s) of the route
# step 2a: grab short to go along with long name captured above
sn = get_element(jsn, 'routeShortName')
# step 2b: short name, ala '33' in 33-McLoughlin or '' for MAX Orange Line
if sn and len(sn) > 0:
if len(ret_val) > 0 and name_sep:
ret_val = ret_val + name_sep
ret_val = ret_val + sn
# step 2c: long name name, ala 'McLoughlin' in 33-McLoughlin, 'MAX Orange Line'
if ln and len(ln) > 0:
if len(ret_val) > 0 and name_sep:
ret_val = ret_val + name_sep
ret_val = ret_val + ln
return ret_val
class Step(object):
def __init__(self, jsn):
self.name = jsn['streetName']
self.lat = jsn['lat']
self.lon = jsn['lon']
self.distance_meters = jsn['distance']
self.distance_feet = m_to_ft(jsn['distance'])
self.distance = pretty_distance(self.distance_feet)
self.compass_direction = self.get_direction(get_element(jsn, 'absoluteDirection'))
self.relative_direction = self.get_direction(get_element(jsn, 'relativeDirection'))
@classmethod
def get_direction(cls, dir):
""" TODO localize me
"""
ret_val = dir
try:
ret_val = {
'LEFT': dir.lower(),
'RIGHT': dir.lower(),
'HARD_LEFT': dir.lower().replace('_', ' '),
'HARD_RIGHT': dir.lower().replace('_', ' '),
'CONTINUE': dir.lower(),
'NORTH': dir.lower(),
'SOUTH': dir.lower(),
'EAST': dir.lower(),
'WEST': dir.lower(),
'NORTHEAST': dir.lower(),
'NORTHWEST': dir.lower(),
'SOUTHEAST': dir.lower(),
'SOUTHWEST': dir.lower(),
}[dir]
except Exception as e:
pass
return ret_val
@classmethod
def get_relative_direction(cls, dir):
""" """
ret_val = dir
return ret_val
class Leg(object):
"""
"""
def __init__(self, jsn):
self.mode = jsn['mode']
fm = Place.factory(jsn['from'], self, 'from')
to = Place.factory(jsn['to'], self, 'to')
self.steps = self.get_steps(jsn)
self.elevation = None
if self.steps and 'steps' in jsn:
self.elevation = Elevation(jsn['steps'])
self.date_info = DateInfo(jsn)
self.compass_direction = self.get_compass_direction()
self.distance_meters = jsn['distance']
self.distance_feet = m_to_ft(jsn['distance'])
self.distance = pretty_distance(self.distance_feet)
# transit related attributes
self.route = None
self.alerts = None
self.transfer = None
self.interline = None
# mode specific config
route_id = None
if self.is_transit_mode():
self.route = Route(jsn)
route_id = self.route.id
if 'alerts' in jsn:
self.alerts = Alert.factory(jsn['alerts'], route_id=self.route.id)
self.interline = self.is_interline(jsn)
svc_date = date_utils.parse_month_day_year_string(self.date_info.service_date)
fm.append_url_params(route_id, month=svc_date['month'], day=svc_date['day'])
to.append_url_params(route_id, month=svc_date['month'], day=svc_date['day'])
@classmethod
def is_interline(cls, jsn):
ret_val = False
if 'interlineWithPreviousLeg' in jsn:
ret_val = jsn['interlineWithPreviousLeg']
return ret_val
def is_transit_mode(self):
return self.mode in ['BUS', 'TRAM', 'RAIL', 'TRAIN', 'SUBWAY', 'CABLECAR', 'GONDOLA', 'FUNICULAR', 'FERRY']
def is_sea_mode(self):
return self.mode in ['FERRY']
def is_air_mode(self):
return self.mode in ['GONDOLA']
def is_non_transit_mode(self):
return self.mode in ['BIKE', 'BICYCLE', 'WALK', 'CAR', 'AUTO']
def get_steps(self, jsn):
ret_val = None
if 'steps' in jsn and jsn['steps'] and len(jsn['steps']) > 0:
ret_val = []
for s in jsn['steps']:
step = Step(s)
ret_val.append(step)
return ret_val
def get_compass_direction(self):
ret_val = None
if self.steps and len(self.steps) > 0:
v = self.steps[0].compass_direction
if v:
ret_val = v
return ret_val
class Itinerary(object):
"""
"""
def __init__(self, jsn, itin_num, url, fares):
self.dominant_mode = None
self.selected = False
self.has_alerts = False
self.alerts = []
self.url = url
self.itin_num = itin_num
self.transfers = jsn['transfers']
self.fare = Fare(jsn, fares)
self.date_info = DateInfoExtended(jsn)
self.legs = self.parse_legs(jsn['legs'])
def set_dominant_mode(self, leg):
""" dominant transit leg -- rail > bus
"""
if object_utils.has_content(self.dominant_mode) is False:
self.dominant_mode = object_utils.safe_str(leg.mode).lower()
if leg.is_transit_mode() and not leg.is_sea_mode():
if self.dominant_mode != 'rail' and leg.mode == 'BUS':
self.dominant_mode = 'bus'
else:
self.dominant_mode = 'rail'
def parse_legs(self, legs):
"""
"""
ret_val = []
# step 1: build the legs
for l in legs:
leg = Leg(l)
ret_val.append(leg)
# step 2: find transfer legs e.g., this pattern TRANSIT LEG, WALK/BIKE LEG, TRANSIT LEG
num_legs = len(ret_val)
for i, leg in enumerate(ret_val):
self.set_dominant_mode(leg)
if leg.is_transit_mode() and i+2 < num_legs:
if ret_val[i+2].is_transit_mode() and ret_val[i+1].is_non_transit_mode():
self.transfer = True
# step 3: find 'unique' alerts and build an alerts object for the itinerary
alerts_hash = {}
for leg in ret_val:
if leg.alerts:
self.has_alerts = True
try:
for a in leg.alerts:
alerts_hash[a.text] = a
except Exception as e:
pass
self.alerts = []
for v in alerts_hash.values():
self.alerts.append(v)
return ret_val
class Plan(object):
""" top level class of the ott 'plan' object tree
contains these elements:
self.from, self.to, self.params, self.arrive_by, self.optimize (plus other helpers
"""
def __init__(self, jsn, params=None, fares=None, path="planner.html?itin_num={0}"):
""" creates a self.from and self.to element in the Plan object """
Place.factory(jsn['from'], self, 'from')
Place.factory(jsn['to'], self, 'to')
self.itineraries = self.parse_itineraries(jsn['itineraries'], path, params, fares)
self.set_plan_params(params)
def parse_itineraries(self, itineraries, path, params, fares):
""" TODO explain me...
"""
ret_val = []
for i, jsn in enumerate(itineraries):
itin_num = i+1
url_params = None
if params:
url_params = params.ott_url_params()
url = self.make_itin_url(path, url_params, itin_num)
itin = Itinerary(jsn, itin_num, url, fares)
ret_val.append(itin)
# set the selected
selected = self.get_selected_itinerary(params, len(ret_val))
if selected >= 0 and selected < len(ret_val):
ret_val[selected].selected = True
return ret_val
def make_itin_url(self, path, query_string, itin_num):
"""
"""
ret_val = None
try:
ret_val = path.format(itin_num)
if query_string:
ret_val = "{0}&{1}".format(ret_val, query_string)
except Exception as e:
log.warn("make_itin_url exception")
return ret_val
def get_selected_itinerary(self, params, max=3):
""" return list position (index starts at zero) of the 'selected' itinerary
@see ParamParser
"""
ret_val = 0
if params:
ret_val = params.get_itin_num_as_int()
ret_val -= 1 # decrement value because we need an array index, eg: itin #1 == itin[0]
# final check to make sure we don't over-index the list of itineraries
if ret_val < 0 or ret_val >= max:
ret_val = 0
return ret_val
def pretty_mode(self, mode):
""" TOD0 TODO TODO localize
"""
ret_val = 'Transit'
if 'BICYCLE' in mode and ('TRANSIT' in mode or ('RAIL' in mode and 'BUS' in mode)):
ret_val = 'Bike to Transit'
elif 'BICYCLE' in mode and 'RAIL' in mode:
ret_val = 'Bike to Rail'
elif 'BICYCLE' in mode and 'BUS' in mode:
ret_val = 'Bike to Bus'
elif 'TRANSIT' in mode:
ret_val = 'Transit'
elif 'BUS' in mode:
ret_val = 'Bus'
elif 'RAIL' in mode:
ret_val = 'Rail'
elif 'BICYCLE' in mode:
ret_val = 'Bike'
elif 'WALK' in mode:
ret_val = 'Walk'
return ret_val
def dominant_transit_mode(self, i=0):
""" TODO ... make better...parse itin affect adverts (at least) """
ret_val = 'rail'
if len(self.itineraries) < i:
i = len(self.itineraries) - 1
if i >= 0 and self.itineraries:
ret_val = self.itineraries[i].dominant_mode
return ret_val
def set_plan_params(self, params):
""" passed in by a separate routine, rather than parsed from returned itinerary
"""
if params:
self.params = {
"is_arrive_by" : params.arrive_depart,
"optimize" : params.optimize,
"map_planner" : params.map_url_params(),
"edit_trip" : params.ott_url_params(),
"return_trip" : params.ott_url_params_return_trip(),
"modes" : self.pretty_mode(params.mode),
"walk" : pretty_distance_meters(params.walk_meters)
}
else:
self.params = {}
self.max_walk = "1.4"
"""
UTILITY METHODS
"""
def get_element(jsn, name, def_val=None):
"""
"""
ret_val = def_val
try:
v = jsn[name]
if type(def_val) == int:
ret_val = int(v)
else:
ret_val = v
except Exception as e:
log.debug(name + " not an int value in jsn")
return ret_val
def ms_to_minutes(ms, is_pretty=False, show_hours=False):
ret_val = ms / 1000 / 60
# pretty '3 hours & 1 minute' string
if is_pretty:
h_str = ''
m_str = ''
# calculate hours string
m = ret_val
if show_hours and m > 60:
h = int(math.floor(m / 60))
m = int(m % 60)
if h > 0:
hrs = 'hour' if h == 1 else 'hours'
h_str = '%d %s' % (h, hrs)
if m > 0:
h_str = h_str + ' ' + '&' + ' '
# calculate minutes string
if m > 0:
mins = 'minute' if m == 1 else 'minutes'
m_str = '%d %s' % (m, mins)
ret_val = '%s%s' % (h_str, m_str)
return ret_val
def hour_min_string(h, m, fmt='{0} {1}', sp=', '):
ret_val = None
if h and h > 0:
hr = 'hours' if h > 1 else 'hour'
ret_val = "{0} {1}".format(h, hr)
if m:
min = 'minutes' if m > 1 else 'minute'
pre = '' if ret_val is None else ret_val + sp
ret_val = "{0}{1} {2}".format(pre, m, min)
return ret_val
def seconds_to_hours_minutes(secs, def_val=None, min_secs=60):
"""
"""
min = def_val
hour = def_val
if secs > min_secs:
m = math.floor(secs / 60)
min = m % 60
if m >= 60:
m = m - min
hour = int(math.floor(m / 60))
return hour,min
def m_to_ft(m):
ret_val = float(m) * 3.28
return ret_val
def distance_dict(distance, measure):
return {'distance':distance, 'measure':measure}
def pretty_distance(feet):
""" TODO localize
"""
ret_val = ''
if feet <= 1.0:
ret_val = distance_dict(1, 'foot')
elif feet < 1000:
ret_val = distance_dict(int(feet), 'feet')
elif feet < 1500:
ret_val = distance_dict('1/4', 'mile')
elif feet < 2200:
ret_val = distance_dict('1/3', 'mile')
elif feet < 3100:
ret_val = distance_dict('1/2', 'mile')
elif feet < 4800:
ret_val = distance_dict('3/4', 'mile')
elif feet < 5400:
ret_val = distance_dict('1', 'mile')
else:
ret_val = distance_dict(round(feet / 5280, 1), 'miles')
return ret_val
def pretty_distance_meters(m):
"""
"""
ret_val = m
try:
d = pretty_distance(float(m) * 3.28)
ret_val = "{distance} {measure}".format(**d)
except Exception as e:
log.warn("pretty distance meters")
return ret_val
def main():
argv = sys.argv
if argv and len(argv) > 1 and ('new' in argv or 'n' in argv):
file = './ott/otp_client/tests/data/new/pdx2ohsu.json'
elif argv and len(argv) > 1 and not ('pretty' in argv or 'p' in argv):
file = argv[1]
else:
file = './ott/otp_client/tests/data/old/pdx2ohsu.json'
try:
f = open(file)
except Exception as e:
path = "{0}/{1}".format('ott/otp_client/tests', file)
f = open(path)
j = json.load(f)
p = Plan(j['plan'])
pretty = False
if argv:
pretty = 'pretty' in argv or 'p' in argv
y = json_utils.json_repr(p, pretty)
print(y)
if __name__ == '__main__':
main()
|
OpenTransitTools/otp_client_py
|
ott/otp_client/otp_to_ott.py
|
Python
|
mpl-2.0
| 36,580
|
##
##SMART FP7 - Search engine for MultimediA enviRonment generated contenT
##Webpage: http://smartfp7.eu
##
## This Source Code Form is subject to the terms of the Mozilla Public
## License, v. 2.0. If a copy of the MPL was not distributed with this
## file, You can obtain one at http://mozilla.org/MPL/2.0/.
##
## The Original Code is Copyright (c) 2012-2013 Atos
## All Rights Reserved
##
## Contributor(s):
## Jose Miguel Garrido, jose.garridog at atos dot net
##
"""The third Multimedia Data Manager.
This module stores the metadata from XML files to a SQLite database.
The video generator uses this database to create the actual video clips"""
# This file must work in python >2.7 and >3.3
import sys
p_v = 2 if sys.version_info < (3,) else 3
if p_v == 2:
import urllib, urllib2
import ConfigParser as cp
else:
import urllib.request, urllib.parse, urllib.error
import configparser as cp
import json
import couchdb
import argparse
import logging
import time, datetime
def getConf(filename,section):
dict1 = {}
config = cp.ConfigParser()
config.read(filename)
options = config.options(section)
for option in options:
try:
dict1[option] = config.get(section, option)
except:
print("exception on {}!".format(option))
dict1[option] = None
dict1["wait_time"] = int(dict1["wait_time"])
dict1["couch_server"] = dict1["couch_server"] if (dict1["couch_server"]!="None") else None
return dict1
def createURL(conf):
query = { "@id": conf["id"] }
if conf["search_type"] == "textual":
command = "txtSearch"
if conf["search_for"] == "venue":
target = "venues"
else:
target = "activities"
if p_v == 2:
url = '{}/{}/{}?label=%22{}%22'.format(conf["url_base"],command,
target,
urllib.quote(conf["keywords"]))
else:
url = '{}/{}/{}?label=%22{}%22'.format(conf["url_base"],command,
target,
urllib.parse.quote(conf["keywords"]))
query.update({ "keywords":conf["keywords"].split(),
"searched_item":conf["search_for"],
"search_type":"textual" })
elif conf["search_type"] == "geo-search":
command = "structuredSearch"
query.update({"search_type":"geo-search"})
if conf["search_for"] == "venue":
query.update({"searched_item":"venues"})
if conf["coord_type"] == "square":
target = "locRec"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["coord2_long"],conf["coord2_lat"]]})
else:
target = "locCirc"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["radius"]]})
else:
query.update({"searched_item":"activities"})
if conf["coord_type"] == "square":
target = "actRec"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["coord2_long"],conf["coord2_lat"]]})
else:
target = "actCirc"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["radius"]]})
if target in ("actCirc","locCirc"):
url = '{}/{}/{}?lat1={}&long1={}&radius={}'.format(conf["url_base"],
command,
target,
conf["coord1_lat"],
conf["coord1_long"],
conf["radius"])
else:
url = '{}/{}/{}?lat1={}&long1={}&lat2={}&long2={}'.format(conf["url_base"],
command,target,
conf["coord1_lat"],
conf["coord1_long"],
conf["coord2_lat"],
conf["coord2_long"])
logging.debug(url)
logging.debug(query)
return url, query
def formatItem(key,doc,time_query,query_info,num):
data = {}
data["time"] = time_query
ldm_result = {}
ldm_result.update(query_info)
ldm_result["key"] = key
if query_info["search_type"] == "textual":
ldm_result["location"] = doc["location"]
else:
ldm_result["location"] = [i["location"] for i in doc["location"]]
ldm_result["location_long"] = [i["long"] for i in doc["location"]]
ldm_result["location_lat"] = [i["lat"] for i in doc["location"]]
if "isPrimaryTopicOf" in doc:
ldm_result["is_primary_topic_of"] = doc["isPrimaryTopicOf"]
if "txt" in doc:
ldm_result["txt"] = doc["txt"]
if "label" in doc:
ldm_result["label"] = doc["label"]
if "date" in doc:
ldm_result["date"] = doc["date"]
if "name" in doc:
ldm_result["name"] = doc["name"]
if "attendance" in doc:
ldm_result["attendance"] = doc["attendance"]
data["ldm_result"] = ldm_result
timestamp = time.time()+(num/1000.0)
time_txt = datetime.datetime.utcfromtimestamp(timestamp).isoformat()+"Z"
item = { "_id":time_txt, "data":data, "timestamp":str(int(timestamp*1000))}
# check for not intended results
remainder = set(doc.keys()) - set(("location", "isPrimaryTopicOf", "txt", "label","date","name","attendance") )
if remainder:
logging.warning("WARNING")
logging.warning(remainder)
logging.debug(item)
return item
def storeItem(db,item):
db.save(item)
if __name__ == '__main__':
#inicialization
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s-> %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--conf_file",type=str,
help="configuration file path")
parser.add_argument("-s", "--section",type=str,
help="section of the configuration to apply")
args = parser.parse_args()
conf_file = args.conf_file if args.conf_file else "ldm_feeder_conf.ini"
section = args.section if args.conf_file else "default"
while True: #until loop
conf = getConf(conf_file,section)
couch = couchdb.Server(conf["couch_server"]) if conf["couch_server"] else couchdb.Server()
db = couch[conf["couch_database"]]
#the program itself
url, query_info = createURL(conf)
if p_v == 2:
response = urllib2.urlopen(url).read()
else:
response = urllib.request.urlopen(url).read()
response = response.decode("utf-8")
response = json.loads(response)
if "locations" in response["data"]:
items = "locations"
elif "activities" in response["data"]:
items = "activities"
for num, i in enumerate(response["data"][items]):
responseItem = formatItem(i,response["data"][items][i],
response["data"]["time"],query_info, num)
storeItem(db, responseItem)
if conf["wait_time"] == 0:
break
else:
time.sleep(conf["wait_time"])
|
SmartSearch/Edge-Node
|
LinkedDataManager/feed_generator/ldm_feeder.py
|
Python
|
mpl-2.0
| 8,016
|
#!/usr/local/bin/python
"""
Handles Google Service Authentication
"""
# TODO(rrayborn): Better documentation
__author__ = "Rob Rayborn"
__copyright__ = "Copyright 2014, The Mozilla Foundation"
__license__ = "MPLv2"
__maintainer__ = "Rob Rayborn"
__email__ = "rrayborn@mozilla.com"
__status__ = "Development"
from OpenSSL.crypto import load_pkcs12, dump_privatekey, FILETYPE_PEM
from datetime import date, datetime, timedelta
from os import environ
import json
import jwt
import requests
import time
_SECRETS_PATH = environ['SECRETS_PATH']
# Header and Grant Type are always the same for Google's API so making a
# variable instead of a file
_HEADER_JSON = {'alg':'RS256','typ':'jwt'}
_GRANT_TYPE = 'urn:ietf:params:oauth:grant-type:jwt-bearer'
# Default filenames
_CLAIMS_FILE = _SECRETS_PATH + 'claims.json'
_P12_FILE = _SECRETS_PATH + 'goog.p12'
_AUTH_FILE = _SECRETS_PATH + '.auth.tmp'
# Other defaults
_GOOG_PASSPHRASE = 'notasecret' # notasecret is the universal google passphrase
class google_service_connection(object):
def __init__(self, json_web_token=None, expiration=None, claims_file=_CLAIMS_FILE,
p12_file=_P12_FILE, auth_file=_AUTH_FILE):
self._json_web_token = None
self._expiration = None
self._auth_token = None
self._claims_file = claims_file
self._p12_file = p12_file
self._auth_file = auth_file
self.get_auth_token(json_web_token,expiration)
def get_expiration(self):
return self._expiration
def set_files(self, claims_file=None, p12_file=None,
auth_file=None):
self._claims_file = claims_file or self._claims_file
self._p12_file = p12_file or self._p12_file
self._auth_file = auth_file or self._auth_file
def _refresh_json_web_token(self, json_web_token=None, expiration=None,
force=False):
if not force and not _expired(self._expiration):
return
if json_web_token or expiration:
if json_web_token and expiration:
if not _expired(expiration):
self._json_web_token = json_web_token
self._expiration = expiration
return
#else continue
else:
raise Exception('_refresh_json_web_token: Must pass json_web_token'\
' and expiration together.')
with open(self._p12_file, 'r') as f:
pk = load_pkcs12(f.read(), _GOOG_PASSPHRASE).get_privatekey()
secret = dump_privatekey(FILETYPE_PEM, pk)
# Load claims json
with open(self._claims_file, 'r') as f:
claims_json = json.load(f)
# Modify claims data
current_time = int(time.time())
claims_json['iat'] = current_time
claims_json['exp'] = current_time + 3600 - 1
# Remember expiration
self._expiration = current_time + 3600
self._json_web_token = jwt.encode(
claims_json, secret, algorithm='RS256', headers=_HEADER_JSON
)
def _load_auth_token(self):
try:
with open(self._auth_file, 'r') as f:
auth_json = json.load(f)
if not _expired(auth_json['expiration']):
self._expiration = auth_json['expiration']
self._auth_token = auth_json['token']
return self._auth_token
else:
return None
except:
return None
def _save_auth_token(self):
with open(self._auth_file, 'w') as f:
data = {'token':self._auth_token, 'expiration':self._expiration}
json.dump(data, f)
def get_auth_token(self, json_web_token=None, expiration=None):
if self._load_auth_token():
return self._auth_token
self._refresh_json_web_token(json_web_token=json_web_token,
expiration=expiration)
parameters = {
'grant_type':_GRANT_TYPE,
'assertion':self._json_web_token
}
response = requests.post('https://accounts.google.com/o/oauth2/token',
data=parameters)
if response.status_code == 200:
self._auth_token = response.json()['access_token']
else:
raise Exception('Token Request results in a %s response code.' \
% response.status_code)
self._save_auth_token()
return self._auth_token
def _expired(exp):
return time.time() >= exp
def main():
gsc = google_service_connection()
if __name__ == '__main__':
main()
|
mozilla/user-advocacy
|
lib/web_api/google_services.py
|
Python
|
mpl-2.0
| 4,807
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
from . import transform
from ..util.yaml import load_yaml
logger = logging.getLogger(__name__)
class TestTask(transform.TransformTask):
"""
A task implementing a Gecko test.
"""
@classmethod
def get_inputs(cls, kind, path, config, params, loaded_tasks):
# the kind on which this one depends
if len(config.get('kind-dependencies', [])) != 1:
raise Exception("TestTask kinds must have exactly one item in kind-dependencies")
dep_kind = config['kind-dependencies'][0]
# get build tasks, keyed by build platform
builds_by_platform = cls.get_builds_by_platform(dep_kind, loaded_tasks)
# get the test platforms for those build tasks
test_platforms_cfg = load_yaml(path, 'test-platforms.yml')
test_platforms = cls.get_test_platforms(test_platforms_cfg, builds_by_platform)
# expand the test sets for each of those platforms
test_sets_cfg = load_yaml(path, 'test-sets.yml')
test_platforms = cls.expand_tests(test_sets_cfg, test_platforms)
# load the test descriptions
test_descriptions = load_yaml(path, 'tests.yml')
# generate all tests for all test platforms
for test_platform_name, test_platform in test_platforms.iteritems():
for test_name in test_platform['test-names']:
test = copy.deepcopy(test_descriptions[test_name])
test['build-platform'] = test_platform['build-platform']
test['test-platform'] = test_platform_name
test['build-label'] = test_platform['build-label']
test['test-name'] = test_name
if test_platform['nightly']:
test.setdefault('attributes', {})['nightly'] = True
logger.debug("Generating tasks for test {} on platform {}".format(
test_name, test['test-platform']))
yield test
@classmethod
def get_builds_by_platform(cls, dep_kind, loaded_tasks):
"""Find the build tasks on which tests will depend, keyed by
platform/type. Returns a dictionary mapping build platform to task."""
builds_by_platform = {}
for task in loaded_tasks:
if task.kind != dep_kind:
continue
build_platform = task.attributes.get('build_platform')
build_type = task.attributes.get('build_type')
if not build_platform or not build_type:
continue
platform = "{}/{}".format(build_platform, build_type)
if platform in builds_by_platform:
raise Exception("multiple build jobs for " + platform)
builds_by_platform[platform] = task
return builds_by_platform
@classmethod
def get_test_platforms(cls, test_platforms_cfg, builds_by_platform):
"""Get the test platforms for which test tasks should be generated,
based on the available build platforms. Returns a dictionary mapping
test platform to {test-set, build-platform, build-label}."""
test_platforms = {}
for test_platform, cfg in test_platforms_cfg.iteritems():
build_platform = cfg['build-platform']
if build_platform not in builds_by_platform:
logger.warning(
"No build task with platform {}; ignoring test platform {}".format(
build_platform, test_platform))
continue
test_platforms[test_platform] = {
'nightly': builds_by_platform[build_platform].attributes.get('nightly', False),
'build-platform': build_platform,
'build-label': builds_by_platform[build_platform].label,
}
test_platforms[test_platform].update(cfg)
return test_platforms
@classmethod
def expand_tests(cls, test_sets_cfg, test_platforms):
"""Expand the test sets in `test_platforms` out to sets of test names.
Returns a dictionary like `get_test_platforms`, with an additional
`test-names` key for each test platform, containing a set of test
names."""
rv = {}
for test_platform, cfg in test_platforms.iteritems():
test_sets = cfg['test-sets']
if not set(test_sets) < set(test_sets_cfg):
raise Exception(
"Test sets {} for test platform {} are not defined".format(
', '.join(test_sets), test_platform))
test_names = set()
for test_set in test_sets:
test_names.update(test_sets_cfg[test_set])
rv[test_platform] = cfg.copy()
rv[test_platform]['test-names'] = test_names
return rv
|
Yukarumya/Yukarum-Redfoxes
|
taskcluster/taskgraph/task/test.py
|
Python
|
mpl-2.0
| 5,066
|
# Copyright (C) 2013-2014 Igor Tkach
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import fcntl
import hashlib
import os
import random
import socket
import tempfile
import time
import traceback
import urllib.parse
from urllib.parse import urlparse
from urllib.parse import urlunparse
from collections import namedtuple
from datetime import datetime, timedelta
from multiprocessing import RLock
from multiprocessing.pool import ThreadPool
from contextlib import contextmanager
import couchdb
import mwclient
import mwclient.page
import pylru
import _thread
def fix_server_url(general_siteinfo):
"""
Get server url from siteinfo's 'general' dict,
add http if scheme is missing. This will also modify
given dictionary.
>>> general_siteinfo = {'server': '//simple.wikipedia.org'}
>>> fix_server_url(general_siteinfo)
'http://simple.wikipedia.org'
>>> general_siteinfo
{'server': 'http://simple.wikipedia.org'}
>>> fix_server_url({'server': 'https://en.wikipedia.org'})
'https://en.wikipedia.org'
>>> fix_server_url({})
''
"""
server = general_siteinfo.get("server", "")
if server:
p = urlparse(server)
if not p.scheme:
server = urlunparse(
urllib.parse.ParseResult(
"http", p.netloc, p.path, p.params, p.query, p.fragment
)
)
general_siteinfo["server"] = server
return server
def update_siteinfo(site, couch_server, db_name):
try:
siteinfo_db = couch_server.create("siteinfo")
except couchdb.PreconditionFailed:
siteinfo_db = couch_server["siteinfo"]
siteinfo = site.api(
"query",
meta="siteinfo",
siprop="general|interwikimap|rightsinfo|statistics|namespaces",
)["query"]
fix_server_url(siteinfo["general"])
siteinfo.pop("userinfo", None)
siteinfo_doc = siteinfo_db.get(db_name)
if siteinfo_doc:
siteinfo_doc.update(siteinfo)
else:
siteinfo_doc = siteinfo
siteinfo_db[db_name] = siteinfo_doc
def parse_args():
argparser = argparse.ArgumentParser()
argparser.add_argument(
"site",
nargs="?",
help=("MediaWiki site to scrape (host name), " "e.g. en.wikipedia.org"),
)
argparser.add_argument(
"--site-path",
default="/w/",
help=("MediaWiki site API path" "Default: %(default)s"),
)
argparser.add_argument(
"--site-ext",
default=".php",
help=("MediaWiki site API script extension" "Default: %(default)s"),
)
argparser.add_argument(
"-c",
"--couch",
help=("CouchDB server URL. " "Default: %(default)s"),
default="http://localhost:5984",
)
argparser.add_argument(
"--db",
help=(
"CouchDB database name. "
"If not specified, the name will be "
"derived from Mediawiki host name."
),
default=None,
)
argparser.add_argument(
"--titles",
nargs="+",
help=(
"Download article pages with "
"these names (titles). "
"It name starts with @ it is "
"interpreted as name of file containing titles, "
"one per line, utf8 encoded."
),
)
argparser.add_argument(
"--start", help=("Download all article pages " "beginning with this name")
)
argparser.add_argument(
"--changes-since",
help=(
"Download all article pages "
"that change since specified time. "
"Timestamp format is yyyymmddhhmmss. "
"See https://www.mediawiki.org/wiki/Timestamp. "
"Hours, minutes and seconds can be omited"
),
)
argparser.add_argument(
"--recent-days",
type=int,
default=1,
help=("Number of days to look back for recent changes"),
)
argparser.add_argument(
"--recent",
action="store_true",
help=("Download recently changed articles only"),
)
argparser.add_argument(
"--timeout",
default=30.0,
type=float,
help=("Network communications timeout. " "Default: %(default)ss"),
)
argparser.add_argument(
"-S",
"--siteinfo-only",
action="store_true",
help=("Fetch or update siteinfo, then exit"),
)
argparser.add_argument(
"-r",
"--resume",
nargs="?",
default="",
metavar="SESSION ID",
help=(
"Resume previous scrape session. "
"This relies on stats saved in "
"mwscrape database."
),
)
argparser.add_argument(
"--sessions-db-name",
default="mwscrape",
help=(
"Name of database where " "session info is stored. " "Default: %(default)s"
),
)
argparser.add_argument(
"--desc", action="store_true", help=("Request all pages in descending order")
)
argparser.add_argument(
"--delete-not-found",
action="store_true",
help=("Remove non-existing pages from the database"),
)
argparser.add_argument(
"--speed", type=int, choices=range(0, 6), default=0, help=("Scrape speed")
)
argparser.add_argument(
"--delay",
type=float,
default=0,
help=(
"Pause before requesting rendered article "
"for this many seconds. Default: %(default)s."
"Some sites limit request rate so that even "
"single-threaded, request-at-a-time scrapes are too fast"
"and additional delay needs to be introduced"
),
)
argparser.add_argument(
"--namespace",
type=int,
default=0,
help=("ID of MediaWiki namespace to " "scrape. Default: %(default)s"),
)
return argparser.parse_args()
SHOW_FUNC = r"""
function(doc, req)
{
var r = /href="\/wiki\/(.*?)"/gi;
var replace = function(match, p1, offset, string) {
return 'href="' + p1.replace(/_/g, ' ') + '"';
};
return doc.parse.text['*'].replace(r, replace);
}
"""
def set_show_func(db, show_func=SHOW_FUNC, force=False):
design_doc = db.get("_design/w", {})
shows = design_doc.get("shows", {})
if force or not shows.get("html"):
shows["html"] = show_func
design_doc["shows"] = shows
db["_design/w"] = design_doc
Redirect = namedtuple("Redirect", "page fragment")
def redirects_to(site, from_title):
""" Same as mwclient.page.Page.redirects_to except it returns page and fragment
in a named tuple instead of just target page
"""
info = site.api("query", prop="pageprops", titles=from_title, redirects="")["query"]
if "redirects" in info:
for page in info["redirects"]:
if page["from"] == from_title:
return Redirect(
page=mwclient.page.Page(site, page["to"]),
fragment=page.get("tofragment", u""),
)
return None
return None
def scheme_and_host(site_host):
p = urlparse(site_host)
scheme = p.scheme if p.scheme else "https"
host = p.netloc if p.scheme else site_host
return scheme, host
def mkcouch(url):
parsed = urlparse(url)
server_url = parsed.scheme + "://" + parsed.netloc
server = couchdb.Server(server_url)
user = parsed.username
password = parsed.password
if password:
print("Connecting %s as user %s" % (server.resource.url, user))
server.resource.credentials = (user, password)
return server
@contextmanager
def flock(path):
with open(path, "w") as lock_fd:
try:
fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yield
except IOError as ex:
if ex.errno == 11:
print(
"Scrape for this host is already in progress. "
"Use --speed option instead of starting multiple processes."
)
raise SystemExit(1)
finally:
lock_fd.close()
def fmt_mw_tms(dt):
return datetime.strftime(dt, "%Y%m%d%H%M%S")
def main():
args = parse_args()
socket.setdefaulttimeout(args.timeout)
couch_server = mkcouch(args.couch)
sessions_db_name = args.sessions_db_name
try:
sessions_db = couch_server.create(sessions_db_name)
except couchdb.PreconditionFailed:
sessions_db = couch_server[sessions_db_name]
if args.resume or args.resume is None:
session_id = args.resume
if session_id is None:
current_doc = sessions_db["$current"]
session_id = current_doc["session_id"]
print("Resuming session %s" % session_id)
session_doc = sessions_db[session_id]
site_host = session_doc["site"]
scheme, host = scheme_and_host(site_host)
db_name = session_doc["db_name"]
session_doc["resumed_at"] = datetime.utcnow().isoformat()
if args.start:
start_page_name = args.start
else:
start_page_name = session_doc.get("last_page_name", args.start)
if args.desc:
descending = True
else:
descending = session_doc.get("descending", False)
sessions_db[session_id] = session_doc
else:
site_host = args.site
db_name = args.db
start_page_name = args.start
descending = args.desc
if not site_host:
print("Site to scrape is not specified")
raise SystemExit(1)
scheme, host = scheme_and_host(site_host)
if not db_name:
db_name = host.replace(".", "-")
session_id = "-".join(
(db_name, str(int(time.time())), str(int(1000 * random.random())))
)
print("Starting session %s" % session_id)
sessions_db[session_id] = {
"created_at": datetime.utcnow().isoformat(),
"site": site_host,
"db_name": db_name,
"descending": descending,
}
current_doc = sessions_db.get("$current", {})
current_doc["session_id"] = session_id
sessions_db["$current"] = current_doc
site = mwclient.Site(host, path=args.site_path, ext=args.site_ext, scheme=scheme)
update_siteinfo(site, couch_server, db_name)
if args.siteinfo_only:
return
try:
db = couch_server.create(db_name)
except couchdb.PreconditionFailed:
db = couch_server[db_name]
set_show_func(db)
def titles_from_args(titles):
for title in titles:
if title.startswith("@"):
with open(os.path.expanduser(title[1:])) as f:
for line in f:
yield line.strip()
else:
yield title
def recently_changed_pages(timestamp):
changes = site.recentchanges(
start=timestamp,
namespace=0,
toponly=1,
type="edit|new",
dir="newer",
show="!minor|!redirect|!anon|!bot",
)
for page in changes:
title = page.get("title")
if title:
doc = db.get(title)
doc_revid = doc.get("parse", {}).get("revid") if doc else None
revid = page.get("revid")
if doc_revid == revid:
continue
yield title
page_list = mwclient.listing.PageList(site, namespace=args.namespace)
if args.titles:
pages = (page_list[title] for title in titles_from_args(args.titles))
elif args.changes_since or args.recent:
if args.recent:
recent_days = args.recent_days
changes_since = fmt_mw_tms(datetime.utcnow() + timedelta(days=-recent_days))
else:
changes_since = args.changes_since.ljust(14, "0")
print("Getting recent changes (since %s)" % changes_since)
pages = (page_list[title] for title in recently_changed_pages(changes_since))
else:
print("Starting at %s" % start_page_name)
pages = site.allpages(
start=start_page_name,
namespace=args.namespace,
dir="descending" if descending else "ascending",
)
# threads are updating the same session document,
# we don't want to have conflicts
lock = RLock()
def inc_count(count_name):
with lock:
session_doc = sessions_db[session_id]
count = session_doc.get(count_name, 0)
session_doc[count_name] = count + 1
sessions_db[session_id] = session_doc
def update_session(title):
with lock:
session_doc = sessions_db[session_id]
session_doc["last_page_name"] = title
session_doc["updated_at"] = datetime.utcnow().isoformat()
sessions_db[session_id] = session_doc
def process(page):
title = page.name
if not page.exists:
print("Not found: %s" % title)
inc_count("not_found")
if args.delete_not_found:
try:
del db[title]
except couchdb.ResourceNotFound:
print("%s was not in the database" % title)
except couchdb.ResourceConflict:
print("Conflict while deleting %s" % title)
except Exception:
traceback.print_exc()
else:
print("%s removed from the database" % title)
return
try:
aliases = set()
redirect_count = 0
while page.redirect:
redirect_count += 1
redirect_target = redirects_to(site, page.name)
frag = redirect_target.fragment
if frag:
alias = (title, frag)
else:
alias = title
aliases.add(alias)
page = redirect_target.page
print("%s ==> %s" % (title, page.name + (("#" + frag) if frag else "")))
if redirect_count >= 10:
print("Too many redirect levels: %r" % aliases)
break
title = page.name
if page.redirect:
print("Failed to resolve redirect %s", title)
inc_count("failed_redirect")
return
doc = db.get(title)
if doc:
current_aliases = set()
for alias in doc.get("aliases", ()):
if isinstance(alias, list):
alias = tuple(alias)
current_aliases.add(alias)
if not aliases.issubset(current_aliases):
merged_aliases = aliases | current_aliases
# remove aliases without fragment if one with fragment is present
# this is mostly to cleanup aliases in old scrapes
to_remove = set()
for alias in merged_aliases:
if isinstance(alias, tuple):
to_remove.add(alias[0])
merged_aliases = merged_aliases - to_remove
doc["aliases"] = list(merged_aliases)
db[title] = doc
revid = doc.get("parse", {}).get("revid")
if page.revision == revid:
print("%s is up to date (rev. %s), skipping" % (title, revid))
inc_count("up_to_date")
return
inc_count("updated")
print(
"[%s] rev. %s => %s %s"
% (
time.strftime("%x %X", (page.touched)) if page.touched else "?",
revid,
page.revision,
title,
)
)
if args.delay:
time.sleep(args.delay)
parse = site.api("parse", page=title)
except KeyboardInterrupt as kbd:
print("Caught KeyboardInterrupt", kbd)
_thread.interrupt_main()
except couchdb.ResourceConflict:
print("Update conflict, skipping: %s" % title)
return
except Exception:
print("Failed to process %s:" % title)
traceback.print_exc()
inc_count("error")
return
if doc:
doc.update(parse)
else:
inc_count("new")
doc = parse
if aliases:
doc["aliases"] = list(aliases)
try:
db[title] = doc
except couchdb.ResourceConflict:
print("Update conflict, skipping: %s" % title)
return
except Exception:
print("Error handling title %r" % title)
traceback.print_exc()
seen = pylru.lrucache(10000)
def ipages(pages):
for index, page in enumerate(pages):
title = page.name
print("%7s %s" % (index, title))
if title in seen:
print("Already saw %s, skipping" % (title,))
continue
seen[title] = True
update_session(title)
yield page
with flock(
os.path.join(
tempfile.gettempdir(), hashlib.sha1(host.encode("utf-8")).hexdigest()
)
):
if args.speed and not args.delay:
pool = ThreadPool(processes=args.speed * 2)
for _result in pool.imap(process, ipages(pages)):
pass
else:
for page in ipages(pages):
process(page)
if __name__ == "__main__":
main()
|
itkach/mwscrape
|
mwscrape/scrape.py
|
Python
|
mpl-2.0
| 17,994
|
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
# This is a template config file for web-platform-tests test.
import os
import sys
config = {
"options": [
"--prefs-root=%(test_path)s/prefs",
"--processes=1",
"--config=%(test_path)s/wptrunner.ini",
"--ca-cert-path=%(test_path)s/certs/cacert.pem",
"--host-key-path=%(test_path)s/certs/web-platform.test.key",
"--host-cert-path=%(test_path)s/certs/web-platform.test.pem",
"--certutil-binary=%(test_install_path)s/bin/certutil",
],
"exes": {
'python': sys.executable,
'virtualenv': [sys.executable, 'c:/mozilla-build/buildbotve/virtualenv.py'],
'hg': 'c:/mozilla-build/hg/hg',
'mozinstall': ['%s/build/venv/scripts/python' % os.getcwd(),
'%s/build/venv/scripts/mozinstall-script.py' % os.getcwd()],
'tooltool.py': [sys.executable, 'C:/mozilla-build/tooltool.py'],
},
"find_links": [
"http://pypi.pvt.build.mozilla.org/pub",
"http://pypi.pub.build.mozilla.org/pub",
],
"pip_index": False,
"buildbot_json_path": "buildprops.json",
"default_blob_upload_servers": [
"https://blobupload.elasticbeanstalk.com",
],
"blob_uploader_auth_file" : os.path.join(os.getcwd(), "oauth.txt"),
"download_minidump_stackwalk": True,
}
|
cstipkovic/spidermonkey-research
|
testing/mozharness/configs/web_platform_tests/prod_config_windows.py
|
Python
|
mpl-2.0
| 1,629
|
from FireGirlOptimizer import *
FGPO = FireGirlPolicyOptimizer()
###To create, uncomment the following two lines:
FGPO.createFireGirlPathways(10,50)
#FGPO.saveFireGirlPathways("FG_pathways_20x50.fgl")
###To load (already created data), uncomment the following line
#FGPO.loadFireGirlPathways("FG_pathways_20x50.fgl")
#Setting Flags
FGPO.NORMALIZED_WEIGHTS_OBJ_FN = False
FGPO.NORMALIZED_WEIGHTS_F_PRIME = False
FGPO.AVERAGED_WEIGHTS_OBJ_FN = True
FGPO.AVERAGED_WEIGHTS_F_PRIME = True
print(" ")
print("Initial Values")
print("objfn: " + str(FGPO.calcObjFn()))
print("fprme: " + str(FGPO.calcObjFPrime()))
print("weights: " + str(FGPO.pathway_weights))
print("net values: " + str(FGPO.pathway_net_values))
#setting new policy
b = [0,0,0,0,0,0,0,0,0,0,0]
pol = FireGirlPolicy(b)
FGPO.setPolicy(pol)
print(" ")
###To Optimize, uncomment the following
print("Beginning Optimization Routine")
FGPO.USE_AVE_PROB = False
output=FGPO.optimizePolicy()
FGPO.printOptOutput(output)
print(" ")
print("Final Values")
print("objfn: " + str(FGPO.calcObjFn()))
print("fprme: " + str(FGPO.calcObjFPrime()))
print("weights: " + str(FGPO.pathway_weights))
print("net values: " + str(FGPO.pathway_net_values))
|
buckinha/gravity
|
deprecated/test_script_optimizer2.py
|
Python
|
mpl-2.0
| 1,197
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP - Account balance reporting engine
# Copyright (C) 2009 Pexego Sistemas Informáticos.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Account balance report templates
Generic account balance report template that will be used to define
accounting concepts with formulas to calculate its values/balance.
Designed following the needs of the Spanish/Spain localization.
"""
from openerp.osv import orm, fields
from openerp.tools.translate import _
_BALANCE_MODE_HELP = """
Formula calculation mode: Depending on it, the balance is calculated as
follows:
Mode 0: debit-credit (default);
Mode 1: debit-credit, credit-debit for accounts in brackets;
Mode 2: credit-debit;
Mode 3: credit-debit, debit-credit for accounts in brackets."""
_VALUE_FORMULA_HELP = """
Value calculation formula: Depending on this formula the final value is
calculated as follows:
Empy template value: sum of (this concept) children values.
Number with decimal point ("10.2"): that value (constant).
Account numbers separated by commas ("430,431,(437)"): Sum of the account
balances (the sign of the balance depends on the balance mode).
Concept codes separated by "+" ("11000+12000"): Sum of those concepts values.
"""
# CSS classes for the account lines
CSS_CLASSES = [('default', 'Default'),
('l1', 'Level 1'),
('l2', 'Level 2'),
('l3', 'Level 3'),
('l4', 'Level 4'),
('l5', 'Level 5')]
class account_balance_reporting_template(orm.Model):
"""Account balance report template.
It stores the header fields of an account balance report template,
and the linked lines of detail with the formulas to calculate
the accounting concepts of the report.
"""
_name = "account.balance.reporting.template"
_columns = {
'name': fields.char('Name', size=64, required=True, select=True),
'type': fields.selection([('system', 'System'),
('user', 'User')], 'Type'),
'report_xml_id': fields.many2one('ir.actions.report.xml',
'Report design', ondelete='set null'),
'description': fields.text('Description'),
'balance_mode': fields.selection(
[('0', 'Debit-Credit'),
('1', 'Debit-Credit, reversed with brackets'),
('2', 'Credit-Debit'),
('3', 'Credit-Debit, reversed with brackets')],
'Balance mode', help=_BALANCE_MODE_HELP),
'line_ids': fields.one2many('account.balance.reporting.template.line',
'template_id', 'Lines'),
}
_defaults = {
'type': 'user',
'balance_mode': '0',
}
def copy(self, cr, uid, rec_id, default=None, context=None):
"""Redefine the copy method to perform it correctly as the line
structure is a graph.
"""
line_obj = self.pool['account.balance.reporting.template.line']
# Read the current item data:
template = self.browse(cr, uid, rec_id, context=context)
# Create the template
new_id = self.create(
cr, uid, {
'name': '%s*' % template.name,
'type': 'user', # Copies are always user templates
'report_xml_id': template.report_xml_id.id,
'description': template.description,
'balance_mode': template.balance_mode,
'line_ids': None,
}, context=context)
# Now create the lines (without parents)
for line in template.line_ids:
line_obj.create(
cr, uid, {
'template_id': new_id,
'sequence': line.sequence,
'css_class': line.css_class,
'code': line.code,
'name': line.name,
'current_value': line.current_value,
'previous_value': line.previous_value,
'negate': line.negate,
'parent_id': None,
'child_ids': None,
}, context=context)
# Now set the (lines) parents
for line in template.line_ids:
if line.parent_id:
# Search for the copied line
new_line_id = line_obj.search(
cr, uid, [('template_id', '=', new_id),
('code', '=', line.code)], context=context)[0]
# Search for the copied parent line
new_parent_id = line_obj.search(
cr, uid, [
('template_id', '=', new_id),
('code', '=', line.parent_id.code),
], context=context)[0]
# Set the parent
line_obj.write(cr, uid, new_line_id,
{'parent_id': new_parent_id}, context=context)
return new_id
class account_balance_reporting_template_line(orm.Model):
"""
Account balance report template line / Accounting concept template
One line of detail of the balance report representing an accounting
concept with the formulas to calculate its values.
The accounting concepts follow a parent-children hierarchy.
"""
_name = "account.balance.reporting.template.line"
_columns = {
'template_id': fields.many2one('account.balance.reporting.template',
'Template', ondelete='cascade'),
'sequence': fields.integer(
'Sequence', required=True,
help="Lines will be sorted/grouped by this field"),
'css_class': fields.selection(CSS_CLASSES, 'CSS Class', required=False,
help="Style-sheet class"),
'code': fields.char('Code', size=64, required=True, select=True,
help="Concept code, may be used on formulas to "
"reference this line"),
'name': fields.char('Name', size=256, required=True, select=True,
help="Concept name/description"),
'current_value': fields.text('Fiscal year 1 formula',
help=_VALUE_FORMULA_HELP),
'previous_value': fields.text('Fiscal year 2 formula',
help=_VALUE_FORMULA_HELP),
'negate': fields.boolean(
'Negate',
help="Negate the value (change the sign of the balance)"),
'parent_id': fields.many2one('account.balance.reporting.template.line',
'Parent', ondelete='cascade'),
'child_ids': fields.one2many('account.balance.reporting.template.line',
'parent_id', 'Children'),
}
_defaults = {
'template_id': (lambda self, cr, uid, context=None:
context.get('template_id', None)),
'negate': False,
'css_class': 'default',
'sequence': 10,
}
_order = "sequence, code"
_sql_constraints = [
('report_code_uniq', 'unique(template_id, code)',
_("The code must be unique for this report!"))
]
def name_get(self, cr, uid, ids, context=None):
"""
Redefine the name_get method to show the code in the name
("[code] name").
"""
if context is None:
context = {}
res = []
for item in self.browse(cr, uid, ids, context=context):
res.append((item.id, "[%s] %s" % (item.code, item.name)))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike',
context=None, limit=80):
"""Redefine the name_search method to allow searching by code."""
if context is None:
context = {}
if args is None:
args = []
ids = []
if name:
ids = self.search(cr, uid, [('code', 'ilike', name)] + args,
limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args,
limit=limit, context=context)
return self.name_get(cr, uid, ids, context=context)
|
jaumemarti/l10n-spain-txerpa
|
account_balance_reporting/account_balance_reporting_template.py
|
Python
|
agpl-3.0
| 9,107
|
"""Added initial tables
Revision ID: initial
Revises: None
Create Date: 2013-10-25 16:52:12.150570
"""
# revision identifiers, used by Alembic.
revision = 'initial'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(u'radio_language',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('iso639_1', sa.String(length=2), nullable=True),
sa.Column('iso639_2', sa.String(length=3), nullable=True),
sa.Column('locale_code', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_network',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('about', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_recording',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(length=160), nullable=True),
sa.Column('local_file', sa.String(length=160), nullable=True),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user_details',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('age', sa.Integer(), nullable=True),
sa.Column('phone', sa.String(length=100), nullable=True),
sa.Column('url', sa.String(length=100), nullable=True),
sa.Column('location', sa.String(length=100), nullable=True),
sa.Column('bio', sa.String(length=100), nullable=True),
sa.Column('gender_code', sa.Integer(), nullable=True),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'telephony_phonenumber',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('carrier', sa.String(length=100), nullable=True),
sa.Column('countrycode', sa.String(length=3), nullable=True),
sa.Column('number', sa.String(length=20), nullable=False),
sa.Column('raw_number', sa.String(length=20), nullable=False),
sa.Column('number_type', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_programtype',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('definition', sa.PickleType(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_location',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('municipality', sa.String(length=100), nullable=True),
sa.Column('district', sa.String(length=100), nullable=True),
sa.Column('modifieddate', sa.Date(), nullable=True),
sa.Column('country', sa.String(length=100), nullable=True),
sa.Column('addressline1', sa.String(length=100), nullable=True),
sa.Column('addressline2', sa.String(length=100), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('email', sa.String(length=100), nullable=False),
sa.Column('openid', sa.String(length=100), nullable=True),
sa.Column('activation_key', sa.String(length=100), nullable=True),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.Column('last_accessed', sa.DateTime(), nullable=True),
sa.Column('avatar', sa.String(length=100), nullable=True),
sa.Column('password', sa.String(length=300), nullable=False),
sa.Column('role_code', sa.SmallInteger(), nullable=True),
sa.Column('status_code', sa.SmallInteger(), nullable=True),
sa.Column('user_detail_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_detail_id'], ['user_details.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('openid')
)
op.create_table('radio_person',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=8), nullable=True),
sa.Column('firstname', sa.String(length=100), nullable=True),
sa.Column('middlename', sa.String(length=100), nullable=True),
sa.Column('lastname', sa.String(length=100), nullable=True),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Column('additionalcontact', sa.String(length=100), nullable=True),
sa.Column('phone_id', sa.Integer(), nullable=True),
sa.Column('gender_code', sa.Integer(), nullable=True),
sa.Column('privacy_code', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['phone_id'], ['telephony_phonenumber.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_program',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('duration', sa.Time(), nullable=True),
sa.Column('update_recurrence', sa.Text(), nullable=True),
sa.Column('language_id', sa.Integer(), nullable=True),
sa.Column('program_type_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['language_id'], ['radio_language.id'], ),
sa.ForeignKeyConstraint(['program_type_id'], ['radio_programtype.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_station',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('about', sa.Text(), nullable=True),
sa.Column('frequency', sa.Float(), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.Column('network_id', sa.Integer(), nullable=True),
sa.Column('location_id', sa.Integer(), nullable=True),
sa.Column('cloud_phone_id', sa.Integer(), nullable=True),
sa.Column('transmitter_phone_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['cloud_phone_id'], ['telephony_phonenumber.id'], ),
sa.ForeignKeyConstraint(['location_id'], ['radio_location.id'], ),
sa.ForeignKeyConstraint(['network_id'], ['radio_network.id'], ),
sa.ForeignKeyConstraint(['owner_id'], ['user_user.id'], ),
sa.ForeignKeyConstraint(['transmitter_phone_id'], ['telephony_phonenumber.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_networkadmins',
sa.Column(u'user_id', sa.Integer(), nullable=True),
sa.Column(u'network_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['radio_network.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user_user.id'], ),
sa.PrimaryKeyConstraint()
)
op.create_table(u'radio_personlanguage',
sa.Column(u'language_id', sa.Integer(), nullable=True),
sa.Column(u'person_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['language_id'], ['radio_language.id'], ),
sa.ForeignKeyConstraint(['person_id'], ['radio_person.id'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('radio_episode',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('program_id', sa.Integer(), nullable=False),
sa.Column('recording_id', sa.Integer(), nullable=True),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['program_id'], ['radio_program.id'], ),
sa.ForeignKeyConstraint(['recording_id'], ['radio_recording.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_scheduledepisode',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('station_id', sa.Integer(), nullable=True),
sa.Column('episode_id', sa.Integer(), nullable=True),
sa.Column('start', sa.DateTime(), nullable=True),
sa.Column('end', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['episode_id'], ['radio_episode.id'], ),
sa.ForeignKeyConstraint(['station_id'], ['radio_station.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_scheduledblock',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('recurrence', sa.Text(), nullable=True),
sa.Column('start_time', sa.Time(), nullable=True),
sa.Column('end_time', sa.Time(), nullable=True),
sa.Column('station_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['station_id'], ['radio_station.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.Column('station_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['person_id'], ['radio_person.id'], ),
sa.ForeignKeyConstraint(['station_id'], ['radio_station.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_stationlanguage',
sa.Column(u'language_id', sa.Integer(), nullable=True),
sa.Column(u'station_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['language_id'], ['radio_language.id'], ),
sa.ForeignKeyConstraint(['station_id'], ['radio_station.id'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('onair_episode',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_time', sa.DateTime(), nullable=True),
sa.Column('scheduledepisode_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['scheduledepisode_id'], ['radio_scheduledepisode.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_paddingcontent',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('recording_id', sa.Integer(), nullable=True),
sa.Column('block_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['block_id'], ['radio_scheduledblock.id'], ),
sa.ForeignKeyConstraint(['recording_id'], ['radio_recording.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('radio_blockedprogram',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('station_id', sa.Integer(), nullable=True),
sa.Column('program_id', sa.Integer(), nullable=True),
sa.Column('block_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['block_id'], ['radio_scheduledblock.id'], ),
sa.ForeignKeyConstraint(['program_id'], ['radio_program.id'], ),
sa.ForeignKeyConstraint(['station_id'], ['radio_station.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'radio_networkpadding',
sa.Column(u'network_id', sa.Integer(), nullable=True),
sa.Column(u'paddingcontent_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['radio_network.id'], ),
sa.ForeignKeyConstraint(['paddingcontent_id'], ['radio_paddingcontent.id'], ),
sa.PrimaryKeyConstraint()
)
op.create_table(u'telephony_call',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('call_uuid', sa.String(length=100), nullable=True),
sa.Column('start_time', sa.DateTime(), nullable=True),
sa.Column('end_time', sa.DateTime(), nullable=True),
sa.Column('from_phonenumber_id', sa.Integer(), nullable=True),
sa.Column('to_phonenumber_id', sa.Integer(), nullable=True),
sa.Column('a_leg_uuid', sa.String(length=100), nullable=True),
sa.Column('a_leg_request_uuid', sa.String(length=100), nullable=True),
sa.Column('onairepisode_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['from_phonenumber_id'], ['telephony_phonenumber.id'], ),
sa.ForeignKeyConstraint(['onairepisode_id'], ['onair_episode.id'], ),
sa.ForeignKeyConstraint(['to_phonenumber_id'], ['telephony_phonenumber.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(u'telephony_message',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('message_uuid', sa.String(length=100), nullable=True),
sa.Column('sendtime', sa.DateTime(), nullable=True),
sa.Column('text', sa.String(length=160), nullable=True),
sa.Column('from_phonenumber_id', sa.Integer(), nullable=True),
sa.Column('to_phonenumber_id', sa.Integer(), nullable=True),
sa.Column('onairepisode_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['from_phonenumber_id'], ['telephony_phonenumber.id'], ),
sa.ForeignKeyConstraint(['onairepisode_id'], ['onair_episode.id'], ),
sa.ForeignKeyConstraint(['to_phonenumber_id'], ['telephony_phonenumber.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'telephony_message')
op.drop_table(u'telephony_call')
op.drop_table(u'radio_networkpadding')
op.drop_table('radio_blockedprogram')
op.drop_table('radio_paddingcontent')
op.drop_table('onair_episode')
op.drop_table(u'radio_stationlanguage')
op.drop_table(u'radio_role')
op.drop_table('radio_scheduledblock')
op.drop_table('radio_scheduledepisode')
op.drop_table('radio_episode')
op.drop_table(u'radio_personlanguage')
op.drop_table(u'radio_networkadmins')
op.drop_table('radio_station')
op.drop_table('radio_program')
op.drop_table('radio_person')
op.drop_table('user_user')
op.drop_table(u'radio_location')
op.drop_table(u'radio_programtype')
op.drop_table(u'telephony_phonenumber')
op.drop_table('user_details')
op.drop_table('radio_recording')
op.drop_table('radio_network')
op.drop_table(u'radio_language')
### end Alembic commands ###
|
rootio/rootio_web
|
alembic/versions/initial_added_tables.py
|
Python
|
agpl-3.0
| 13,787
|
# -*- coding: utf-8 -*-
# This file is part of PrawoKultury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from datetime import datetime
from django.core.mail import send_mail, mail_managers
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import models
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _, override
import getpaid
from migdal.models import Entry
from . import app_settings
class Offer(models.Model):
""" A fundraiser for a particular book. """
entry = models.OneToOneField(Entry, models.CASCADE) # filter publications!
price = models.DecimalField(_('price'), decimal_places=2, max_digits=6)
cost_const = models.DecimalField(decimal_places=2, max_digits=6)
cost_per_item = models.DecimalField(decimal_places=2, max_digits=6, default=0)
class Meta:
verbose_name = _('offer')
verbose_name_plural = _('offers')
ordering = ['entry']
def __unicode__(self):
return unicode(self.entry)
def get_absolute_url(self):
return self.entry.get_absolute_url()
def total_per_item(self):
return self.price + self.cost_per_item
def price_per_items(self, items):
return self.cost_const + items * self.total_per_item()
class Order(models.Model):
""" A person paying for a book.
The payment was completed if and only if payed_at is set.
"""
offer = models.ForeignKey(Offer, models.CASCADE, verbose_name=_('offer'))
items = models.IntegerField(verbose_name=_('items'), default=1)
name = models.CharField(_('name'), max_length=127, blank=True)
email = models.EmailField(_('email'), db_index=True)
address = models.TextField(_('address'), db_index=True)
payed_at = models.DateTimeField(_('payed at'), null=True, blank=True, db_index=True)
language_code = models.CharField(max_length = 2, null = True, blank = True)
class Meta:
verbose_name = _('order')
verbose_name_plural = _('orders')
ordering = ['-payed_at']
def __unicode__(self):
return "%s (%d egz.)" % (unicode(self.offer), self.items)
def get_absolute_url(self):
return self.offer.get_absolute_url()
def amount(self):
return self.offer.price_per_items(self.items)
def notify(self, subject, template_name, extra_context=None):
context = {
'order': self,
'site': Site.objects.get_current(),
}
if extra_context:
context.update(extra_context)
with override(self.language_code or app_settings.DEFAULT_LANGUAGE):
send_mail(subject,
render_to_string(template_name, context),
getattr(settings, 'CONTACT_EMAIL', 'prawokultury@nowoczesnapolska.org.pl'),
[self.email],
fail_silently=False
)
def notify_managers(self, subject, template_name, extra_context=None):
context = {
'order': self,
'site': Site.objects.get_current(),
}
if extra_context:
context.update(extra_context)
with override(app_settings.DEFAULT_LANGUAGE):
mail_managers(subject, render_to_string(template_name, context))
# Register the Order model with django-getpaid for payments.
getpaid.register_to_payment(Order, unique=False, related_name='payment')
def new_payment_query_listener(sender, order=None, payment=None, **kwargs):
""" Set payment details for getpaid. """
payment.amount = order.amount()
payment.currency = 'PLN'
getpaid.signals.new_payment_query.connect(new_payment_query_listener)
def user_data_query_listener(sender, order, user_data, **kwargs):
""" Set user data for payment. """
user_data['email'] = order.email
getpaid.signals.user_data_query.connect(user_data_query_listener)
def payment_status_changed_listener(sender, instance, old_status, new_status, **kwargs):
""" React to status changes from getpaid. """
if old_status != 'paid' and new_status == 'paid':
instance.order.payed_at = datetime.now()
instance.order.save()
instance.order.notify(
_('Your payment has been completed.'),
'shop/email/payed.txt'
)
instance.order.notify_managers(
_('New order has been placed.'),
'shop/email/payed_managers.txt'
)
getpaid.signals.payment_status_changed.connect(payment_status_changed_listener, dispatch_uid='shop.models.payment_status_changed_listener')
|
fnp/prawokultury
|
shop/models.py
|
Python
|
agpl-3.0
| 4,642
|
#
# Author: Jorg Bornschein <bornschein@fias.uni-frankfurt.de)
# Lincense: Academic Free License (AFL) v3.0
#
from __future__ import division
import numpy as np
from math import pi
from scipy.misc import comb
from mpi4py import MPI
import pulp.em as em
import pulp.utils.parallel as parallel
import pulp.utils.tracing as tracing
import pulp.utils.accel as accel
from pulp.utils.datalog import dlog
from pulp.em.camodels import CAModel
from pulp.utils.autotable import AutoTable
class MMCA_ET(CAModel):
def __init__(self, D, H, Hprime, gamma, to_learn=['W', 'pi', 'sigma'], comm=MPI.COMM_WORLD):
""" MMCA-ET init method.
Takes data dimension *D*, number of hidden causes *H*,
and ET approximation parameters *Hprime* and *gamma*. Optional
list of parameters *to_learn* and MPI *comm* object.
"""
CAModel.__init__(self, D, H, Hprime, gamma, to_learn, comm)
#
self.rho_T_bound = 1.20 # for rho: never use a T smaller than this
self.rho_lbound = 1 # for rho: never use a rho smaller than this
self.rho_ubound = 35 # for rho: never use a rho larger than this
self.tol = 1e-4 # for W: ensure W[W<tol] = tol
self.rev_corr = False
# Noise Policy
tol = self.tol
self.noise_policy = {
'W' : ( -np.inf, +np.inf, False ),
'pi' : ( tol, 1-tol, False ),
'sigma': ( tol, +np.inf, False )
}
# XXX debugging XXX
#self.tbl = AutoTable("mmca-debug/mmca-debug-%04d.h5" % comm.rank)
#self.last_candidates = None
@tracing.traced
def check_params(self, model_params):
"""
Sanity-check the given model parameters. Raises an exception if something
is severely wrong.
"""
# XXX
#model_params = CAModel.check_params(self, model_params)
tol = self.tol
W = model_params['W']
# Ensure |W| >= tol
W[np.logical_and(W >= 0., W < +tol)] = +tol
W[np.logical_and(W <= 0., W > -tol)] = -tol
return model_params
@tracing.traced
def generate_from_hidden(self, model_params, my_hdata):
"""
Generate data according to the MCA model while the latents are
given in my_hdata['s'].
"""
W = model_params['W']
pies = model_params['pi']
sigma = model_params['sigma']
H, D = W.shape
s = my_hdata['s']
my_N, _ = s.shape
# Create output arrays, y is data
y = np.zeros( (my_N, D) )
for n in xrange(my_N):
# Combine accoring do magnitude-max rule
t0 = s[n, :, None] * W # (H, D) "stacked" version of a datapoint
idx = np.argmax(np.abs(t0), axis=0) # Find maximum magnitude in stack
y[n] = t0[idx].diagonal() # Collaps it
# Add noise according to the model parameters
y += np.random.normal( scale=sigma, size=(my_N, D) )
# Build return structure
return { 'y': y, 's': s }
@tracing.traced
def select_Hprimes(self, model_params, data):
"""
Return a new data-dictionary which has been annotated with
a data['candidates'] dataset. A set of self.Hprime candidates
will be selected.
"""
comm = self.comm
my_y = data['y']
my_N, _ = my_y.shape
H, Hprime = self.H, self.Hprime
W = model_params['W']
#if self.last_candidates is not None:
# print "Reusing candidates"
# data['candidates'] = self.last_candidates
# return data
# Allocate return structure
candidates = np.zeros( (my_N, Hprime), dtype=np.int )
for n in xrange(my_N):
#W_interm = np.maximum(W, my_y[n])
#sim = np.abs(W_interm-my_y[n]).sum(axis=1)
sim = ((W-my_y[n])**2).sum(axis=1)
candidates[n] = np.argsort(sim)[0:Hprime]
data['candidates'] = candidates
#self.last_candidates = candidates
return data
@tracing.traced
def E_step(self, anneal, model_params, my_data):
""" MCA E_step
my_data variables used:
my_data['y'] Datapoints
my_data['can'] Candidate H's according to selection func.
Annealing variables used:
anneal['T'] Temperature for det. annealing AND softmax
anneal['N_cut_factor'] 0.: no truncation; 1. trunc. according to model
"""
comm = self.comm
my_y = my_data['y']
my_cand = my_data['candidates']
my_N, D = my_data['y'].shape
H = self.H
state_mtx = self.state_matrix # shape: (no_states, Hprime)
state_abs = self.state_abs # shape: (no_states,)
no_states = len(state_abs)
W = model_params['W']
pies = model_params['pi']
sigma = model_params['sigma']
# Disable some warnings
old_seterr = np.seterr(divide='ignore', under='ignore')
# Precompute
T = anneal['T']
T_rho = np.maximum(T, self.rho_T_bound)
rho = 1./(1.-1./T_rho)
rho = np.maximum(np.minimum(rho, self.rho_ubound), self.rho_lbound)
beta = 1./T
pre1 = -1./2./sigma/sigma
pil_bar = np.log( pies/(1.-pies) )
Wl = accel.log(np.abs(W))
Wrho = accel.exp(rho * Wl)
Wrhos = np.sign(W) * Wrho
# Allocate return structures
F = np.empty( [my_N, 1+H+no_states] )
# Iterate over all datapoints
tracing.tracepoint("E_step:iterating...")
for n in xrange(my_N):
y = my_y[n,:]
cand = my_cand[n,:]
# Zero active hidden causes
log_prod_joint = pre1 * (y**2).sum()
F[n,0] = log_prod_joint
# Hidden states with one active cause
log_prod_joint = pil_bar + pre1 * ((W-y)**2).sum(axis=1)
F[n,1:H+1] = log_prod_joint
# Handle hidden states with more than 1 active cause
log_prior = pil_bar * state_abs # is (no_states,)
Wrhos_ = Wrhos[cand] # is (Hprime, D)
t0 = np.dot(state_mtx, Wrhos_)
Wbar = np.sign(t0) * accel.exp(accel.log(np.abs(t0))/rho)
log_prod_joint = log_prior + pre1 * ((Wbar-y)**2).sum(axis=1)
F[n,1+H:] = log_prod_joint
assert np.isfinite(F).all()
# Restore np.seterr
np.seterr(**old_seterr)
return { 'logpj': F }
@tracing.traced
def M_step(self, anneal, model_params, my_suff_stat, my_data):
""" MCA M_step
my_data variables used:
my_data['y'] Datapoints
my_data['candidates'] Candidate H's according to selection func.
Annealing variables used:
anneal['T'] Temperature for det. annealing AND softmax
anneal['N_cut_factor'] 0.: no truncation; 1. trunc. according to model
"""
comm = self.comm
H, Hprime = self.H, self.Hprime
gamma = self.gamma
W = model_params['W']
pies = model_params['pi']
sigma = model_params['sigma']
# Read in data:
my_y = my_data['y']
my_cand = my_data['candidates']
my_logpj = my_suff_stat['logpj']
my_N, D = my_y.shape
N = comm.allreduce(my_N)
state_mtx = self.state_matrix # shape: (no_states, Hprime)
state_abs = self.state_abs # shape: (no_states,)
no_states = len(state_abs)
# Disable some warnings
old_seterr = np.seterr(divide='ignore', under='ignore')
# To compute et_loglike:
my_ldenom_sum = 0.0
ldenom_sum = 0.0
# Precompute
T = anneal['T']
T_rho = np.maximum(T, self.rho_T_bound)
rho = 1./(1.-1./T_rho)
rho = np.maximum(np.minimum(rho, self.rho_ubound), self.rho_lbound)
beta = 1./T
pre1 = -1./2./sigma/sigma
pil_bar = np.log( pies/(1.-pies) )
Wl = accel.log(np.abs(W))
Wrho = accel.exp(rho * Wl)
Wrhos = np.sign(W) * Wrho
Wsquared = W*W
# Some asserts
assert np.isfinite(pil_bar).all()
assert np.isfinite(Wl).all()
assert np.isfinite(Wrho).all()
assert (Wrho > 1e-86).all()
my_corr = beta*((my_logpj).max(axis=1)) # shape: (my_N,)
my_logpjb = beta*my_logpj - my_corr[:, None] # shape: (my_N, no_states)
my_pj = accel.exp(my_logpj) # shape: (my_N, no_states)
my_pjb = accel.exp(my_logpjb) # shape: (my_N, no_states)
# Precompute factor for pi update and ET cutting
A_pi_gamma = 0.; B_pi_gamma = 0.
for gp in xrange(0, self.gamma+1):
a = comb(H, gp) * pies**gp * (1.-pies)**(H-gp)
A_pi_gamma += a
B_pi_gamma += gp * a
# Truncate data
if anneal['Ncut_factor'] > 0.0:
tracing.tracepoint("M_step:truncating")
my_logdenoms = accel.log(my_pjb.sum(axis=1)) + my_corr
N_use = int(N * (1 - (1 - A_pi_gamma) * anneal['Ncut_factor']))
cut_denom = parallel.allsort(my_logdenoms)[-N_use]
my_sel, = np.where(my_logdenoms >= cut_denom)
my_N, = my_sel.shape
N_use = comm.allreduce(my_N)
else:
my_N,_ = my_y.shape
my_sel = np.arange(my_N)
N_use = N
# Allocate suff-stat arrays
my_Wp = np.zeros_like(W) # shape (H, D)
my_Wq = np.zeros_like(W) # shape (H, D)
my_pi = 0.0 #
my_sigma = 0.0 #
# Do reverse correlation if requested
if self.rev_corr:
my_y_rc = my_data['y_rc']
D_rev_corr = my_y_rc.shape[1]
my_rev_corr = np.zeros( (H,D_rev_corr) )
my_rev_corr_count = np.zeros(H)
# Iterate over all datapoints
tracing.tracepoint("M_step:iterating...")
dlog.append('N_use', N_use)
for n in my_sel:
y = my_y[n,:] # shape (D,)
cand = my_cand[n,:] # shape (Hprime,)
logpj = my_logpj[n,:] # shape (no_states,)
logpjb = my_logpjb[n,:] # shape (no_states,)
pj = my_pj[n,:] # shape (no_states,)
pjb = my_pjb[n,:] # shape (no_states,)
this_Wp = np.zeros_like(W) # numerator for W (current datapoint) (H, D)
this_Wq = np.zeros_like(W) # denominator for W (current datapoint) (H, D)
this_pi = 0.0 # numerator for pi update (current datapoint)
this_sigma = 0.0 # numerator for gamma update (current datapoint)
# Zero active hidden causes
# this_Wp += 0. # nothing to do
# this_Wq += 0. # nothing to do
# this_pi += 0. # nothing to do
this_sigma += pjb[0] * (y**2).sum()
# One active hidden cause
this_Wp += (pjb[1:(H+1),None]) * y[None, :]
this_Wq += (pjb[1:(H+1),None])
this_pi += pjb[1:(H+1)].sum()
this_sigma += (pjb[1:(H+1)] * ((W-y)**2).sum(axis=1)).sum()
# Handle hidden states with more than 1 active cause
W_ = W[cand] # is (Hprime, D)
Wl_ = Wl[cand] # is ( " ")
Wrho_ = Wrho[cand] # is ( " ")
Wrhos_ = Wrhos[cand] # is ( " ")
#Wbar = calc_Wbar(state_mtx, W_)
#Wlbar = np.log(np.abs(Wbar))
t0 = np.dot(state_mtx, Wrhos_)
Wlbar = accel.log(np.abs(t0)) / rho # is (no_states, D)
#Wlbar = np.maximum(Wlbar, -9.21)
Wbar = np.sign(t0)*accel.exp(Wlbar) # is (no_states, D)
t = Wlbar[:, None, :]-Wl_[None, :, :]
t = np.maximum(t, 0.)
Aid = state_mtx[:,:, None] * accel.exp(logpjb[H+1:,None,None] - (rho-1)*t)
Aid = Aid.sum(axis=0)
#Aid = calc_Aid(logpjb[H+1:], W_, Wl_, state_mtx, Wbar, Wlbar, rho)
#assert np.isfinite(Wlbar).all()
#assert np.isfinite(Wbar).all()
#assert np.isfinite(pjb).all()
#assert np.isfinite(Aid).all()
this_Wp[cand] += Aid * y[None, :]
this_Wq[cand] += Aid
this_pi += (pjb[1+H:] * state_abs).sum()
this_sigma += (pjb[1+H:] * ((Wbar-y)**2).sum(axis=1)).sum()
denom = pjb.sum()
my_Wp += this_Wp / denom
my_Wq += this_Wq / denom
my_pi += this_pi / denom
my_sigma += this_sigma / denom
#self.tbl.append("logpj", logpj)
#self.tbl.append("corr", my_corr[n])
#self.tbl.append("denom", denom)
#self.tbl.append("cand", cand)
#self.tbl.append("Aid", Aid)
my_ldenom_sum += accel.log(np.sum(accel.exp(logpj))) #For loglike computation
# Estimate reverse correlation
if self.rev_corr:
pys = pjb / denom
if np.isfinite(pys).all():
my_rev_corr += pys[1:H+1, None]*my_y_rc[n,None,:]
my_rev_corr_count += pys[1:H+1]
my_rev_corr[cand] += np.sum(state_mtx[:,:,None]*pys[H+1:,None,None]*my_y_rc[n,None,:], axis=0)
my_rev_corr_count[cand] += np.sum(state_mtx[:,:]*pys[H+1,None], axis=0)
else:
print "Not all finite rev_corr %d" % n
# Calculate updated W
if 'W' in self.to_learn:
tracing.tracepoint("M_step:update W")
Wp = np.empty_like(my_Wp)
Wq = np.empty_like(my_Wq)
assert np.isfinite(my_Wp).all()
assert np.isfinite(my_Wq).all()
comm.Allreduce( [my_Wp, MPI.DOUBLE], [Wp, MPI.DOUBLE] )
comm.Allreduce( [my_Wq, MPI.DOUBLE], [Wq, MPI.DOUBLE] )
# Make sure wo do not devide by zero
tiny = self.tol
Wq[Wq < tiny] = tiny
# Calculate updated W
W_new = Wp / Wq
# Add inertia depending on Wq
alpha = 2.5
inertia = np.maximum(1. - accel.exp(-Wq / alpha), 0.2)
W_new = inertia*W_new + (1-inertia)*W
else:
W_new = W
# Calculate updated pi
if 'pi' in self.to_learn:
tracing.tracepoint("M_step:update pi")
assert np.isfinite(my_pi).all()
pi_new = A_pi_gamma / B_pi_gamma * pies * comm.allreduce(my_pi) / N_use
else:
pi_new = pies
# Calculate updated sigma
if 'sigma' in self.to_learn: # TODO: XXX see LinCA XXX (merge!)
tracing.tracepoint("M_step:update sigma")
assert np.isfinite(my_sigma).all()
sigma_new = np.sqrt(comm.allreduce(my_sigma) / D / N_use)
else:
sigma_new = sigma
# Put all together and compute (always) et_approx_likelihood
ldenom_sum = comm.allreduce(my_ldenom_sum)
lAi = (H * np.log(1. - pi_new)) - ((D/2) * np.log(2*pi)) -( D * np.log(sigma_new))
# For practical and et approx reasons we use: sum of restected respons=1
loglike_et = (lAi * N_use) + ldenom_sum
if self.rev_corr:
rev_corr = np.empty_like(my_rev_corr)
rev_corr_count = np.empty_like(my_rev_corr_count)
comm.Allreduce( [my_rev_corr, MPI.DOUBLE], [rev_corr, MPI.DOUBLE])
comm.Allreduce( [my_rev_corr_count, MPI.DOUBLE], [rev_corr_count, MPI.DOUBLE])
rev_corr /= (1e-16+rev_corr_count[:,None])
else:
rev_corr = np.zeros( (H, D) )
# Restore np.seterr
np.seterr(**old_seterr)
return { 'W': W_new, 'pi': pi_new, 'sigma': sigma_new , 'rev_corr': rev_corr, 'Q':loglike_et}
|
jbornschein/mca-genmodel
|
pulp/em/camodels/mmca_et.py
|
Python
|
agpl-3.0
| 16,713
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import datetime
# Django
from django.contrib.auth.models import User
from django.urls import reverse
# wger
from wger.core.tests.base_testcase import WgerTestCase
from wger.utils.helpers import (
make_token,
next_weekday
)
# TODO: parse the generated calendar files with the icalendar library
class IcalToolsTestCase(WgerTestCase):
"""
Tests some tools used for iCal generation
"""
def test_next_weekday(self):
"""
Test the next weekday function
"""
start_date = datetime.date(2013, 12, 5)
# Find next monday
self.assertEqual(next_weekday(start_date, 0), datetime.date(2013, 12, 9))
# Find next wednesday
self.assertEqual(next_weekday(start_date, 2), datetime.date(2013, 12, 11))
# Find next saturday
self.assertEqual(next_weekday(start_date, 5), datetime.date(2013, 12, 7))
class WorkoutICalExportTestCase(WgerTestCase):
"""
Tests exporting the ical file for a workout
"""
def export_ical_token(self):
"""
Helper function that checks exporing an ical file using tokens for access
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:workout:ical', kwargs={'pk': 3,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/calendar')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Calendar-workout-3.ics')
# Approximate size
self.assertGreater(len(response.content), 540)
self.assertLess(len(response.content), 560)
def export_ical_token_wrong(self):
"""
Helper function that checks exporing an ical file using a wrong token
"""
uid = 'AB'
token = 'abc-11223344556677889900'
response = self.client.get(reverse('manager:workout:ical', kwargs={'pk': 3,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 403)
def export_ical(self, fail=False):
"""
Helper function
"""
response = self.client.get(reverse('manager:workout:ical', kwargs={'pk': 3}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/calendar')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Calendar-workout-3.ics')
# Approximate size
self.assertGreater(len(response.content), 540)
self.assertLess(len(response.content), 560)
def test_export_ical_anonymous(self):
"""
Tests exporting a workout as an ical file as an anonymous user
"""
self.export_ical(fail=True)
self.export_ical_token()
self.export_ical_token_wrong()
def test_export_ical_owner(self):
"""
Tests exporting a workout as an ical file as the owner user
"""
self.user_login('test')
self.export_ical(fail=False)
self.export_ical_token()
self.export_ical_token_wrong()
def test_export_ical_other(self):
"""
Tests exporting a workout as an ical file as a logged user not owning the data
"""
self.user_login('admin')
self.export_ical(fail=True)
self.export_ical_token()
self.export_ical_token_wrong()
class ScheduleICalExportTestCase(WgerTestCase):
"""
Tests exporting the ical file for a schedule
"""
def export_ical_token(self):
"""
Helper function that checks exporing an ical file using tokens for access
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:ical', kwargs={'pk': 2,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/calendar')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Calendar-schedule-2.ics')
# Approximate size
self.assertGreater(len(response.content), 1650)
self.assertLess(len(response.content), 1670)
def export_ical_token_wrong(self):
"""
Helper function that checks exporing an ical file using a wrong token
"""
uid = 'AB'
token = 'abc-11223344556677889900'
response = self.client.get(reverse('manager:schedule:ical', kwargs={'pk': 2,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 403)
def export_ical(self, fail=False):
"""
Helper function
"""
response = self.client.get(reverse('manager:schedule:ical', kwargs={'pk': 2}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/calendar')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Calendar-schedule-2.ics')
# Approximate size
self.assertGreater(len(response.content), 1650)
self.assertLess(len(response.content), 1670)
def test_export_ical_anonymous(self):
"""
Tests exporting a schedule as an ical file as an anonymous user
"""
self.export_ical(fail=True)
self.export_ical_token()
self.export_ical_token_wrong()
def test_export_ical_owner(self):
"""
Tests exporting a schedule as an ical file as the owner user
"""
self.user_login('admin')
self.export_ical(fail=False)
self.export_ical_token()
self.export_ical_token_wrong()
def test_export_ical_other(self):
"""
Tests exporting a schedule as an ical file as a logged user not owning the data
"""
self.user_login('test')
self.export_ical(fail=True)
self.export_ical_token()
self.export_ical_token_wrong()
|
rolandgeider/wger
|
wger/manager/tests/test_ical.py
|
Python
|
agpl-3.0
| 7,598
|
from __future__ import absolute_import
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
Module for running scrapers
"""
import logging;log = logging.getLogger(__name__)
from collections import namedtuple
from amcat.models import Article, Project
ScrapeError = namedtuple("ScrapeError", ["i", "unit", "error"])
class Controller(object):
def __init__(self):
self.errors = []
self.articles = []
def run(self, scraper):
try:
units = list(scraper._get_units())
except Exception as e:
self.errors.append(ScrapeError(None,None,e))
log.exception("scraper._get_units failed")
return self.articles
for i, unit in enumerate(units):
try:
articles = list(scraper._scrape_unit(unit))
except Exception as e:
log.exception("scraper._scrape_unit failed")
self.errors.append(ScrapeError(i,unit,e))
continue
self.articles += articles
for article in self.articles:
_set_default(article, 'project', scraper.project)
try:
articles, errors = Article.create_articles(self.articles, scraper.articleset)
self.saved_article_ids = {getattr(a, "duplicate_of", a.id) for a in self.articles}
for e in errors:
self.errors.append(ScrapeError(None,None,e))
except Exception as e:
self.errors.append(ScrapeError(None,None,e))
log.exception("scraper._get_units failed")
return self.saved_article_ids
def _set_default(obj, attr, val):
try:
if getattr(obj, attr, None) is not None: return
except Project.DoesNotExist:
pass # django throws DNE on x.y if y is not set and not nullable
setattr(obj, attr, val)
|
tschmorleiz/amcat
|
amcat/scripts/article_upload/controller.py
|
Python
|
agpl-3.0
| 3,148
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('autodidact', '0002_auto_20161004_1251'),
]
operations = [
migrations.CreateModel(
name='RightAnswer',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('value', models.CharField(help_text='This value can either be a case-insensitive string or a numeric value. For numeric values you can use the <a target="_blank" href="https://docs.moodle.org/23/en/GIFT_format">GIFT notation</a> of "answer:tolerance" or "low..high".', max_length=255)),
('step', models.ForeignKey(related_name='right_answers', to='autodidact.Step')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='WrongAnswer',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('value', models.CharField(help_text='Supplying one or more wrong answers will turn this into a multiple choice question.', max_length=255)),
('step', models.ForeignKey(related_name='wrong_answers', to='autodidact.Step')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='course',
name='slug',
field=models.SlugField(unique=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='answer_required',
field=models.BooleanField(default=False, help_text='If enabled, this step will show students an input field where they can enter their answer. Add one or more right answers below to have students’ answers checked for correctness.'),
preserve_default=True,
),
]
|
JaapJoris/autodidact
|
autodidact/migrations/0003_auto_20170116_1142.py
|
Python
|
agpl-3.0
| 2,079
|
from gi.repository import Gtk
import os
class ExportDialog(Gtk.Dialog):
def __init__(self,parent,*args):
Gtk.Dialog.__init__(self, "Exportieren", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(150,150)
self.contentarea=self.get_content_area()
self.selection_type=""
self.selection_folder=""
self.combo_store=Gtk.ListStore(str)
self.combo_store.append(["CSV"])
self.combo_store.append(["XML"])
self.combo=Gtk.ComboBox.new_with_model_and_entry(self.combo_store)
self.combo.connect("changed",self.update_select_type)
self.combo.set_entry_text_column(0)
self.contentarea.add(self.combo)
self.filechooser = Gtk.FileChooserButton(Gtk.FileChooserAction.CREATE_FOLDER)
self.filechooser.set_create_folders(True)
self.filechooser.set_action(Gtk.FileChooserAction.SELECT_FOLDER)
self.filechooser.connect("file-set",self.update_select_folder)
self.contentarea.add(self.filechooser)
self.show_all()
def update_select_type(self,combo,*args):
treit=combo.get_active_iter()
if(treit == None):
return
self.selection_type=combo.get_model()[treit][0]
return
def update_select_folder(self,chooser,*args):
self.selection_folder=chooser.get_filename()
|
daknuett/BeeKeeper
|
pythons/objs_graphics.py
|
Python
|
agpl-3.0
| 1,259
|
"""
Name : Stegano Extract and Read File From Image
Created By : Agus Makmun (Summon Agus)
Blog : bloggersmart.net - python.web.id
License : GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007
Documentation : https://github.com/agusmakmun/Some-Examples-of-Simple-Python-Script/
"""
import os
import time, zipfile
class scureImage(object):
def _secure(self, image, zipfile, new_image):
return os.system("cat "+image+" "+zipfile+" > "+new_image)
def _openScure(self, new_image):
return os.system("unzip "+new_image)
def _stegano(self, zipFile):
archive = zipfile.ZipFile(zipFile, 'r')
list_name = archive.namelist()
print "[+] This list of files in the image."
print "+---------------------------------------+"
print " ", list_name
print "+---------------------------------------+"
file_open = raw_input("[+] Type file want to read.\n[+] >>> ")
try:
print "[+] This content of { "+file_open+" }"
print "+---------------------------------------+"
print archive.read(file_open)
print "+---------------------------------------+\n"
except KeyError:
print "[-] Uppss, {", file_open, "} is not found at this file."
print "[-] Please check again!"
def main(self):
print "\n\tWelcome to Python Scure Image { STEGANO METHOD }"
print "[+] Please choice this options:"
print " 1. Saved files in image."
print " 2. Extract files from image."
print " 3. Stegano read file from image.\n"
mome = scureImage()
choice = raw_input("[+] >>> ")
if choice == "1":
print os.listdir(".")
img = raw_input("[+] Type Image file that will save your archive.\n[+] >>> ")
zip = raw_input("[+] Type your Zip file: ")
new_img = raw_input("[+] Type New Image that will save your zip: ")
mome._secure(img, zip, new_img)
print os.listdir(".")
elif choice == "2":
print os.listdir(".")
new_img = raw_input("[+] Type Image that will going to Extract all files.\n[+] >>> ")
mome._openScure(new_img)
time.sleep(2)
print os.listdir(".")
elif choice == "3":
print os.listdir(".")
zipName = raw_input("[+] Type Image where your file was saved.\n[+] >>> ")
try:
mome._stegano(zipName)
except IOError:
print "[-] Uppss, {", zipName, "} is not image or not found at this directory."
print "[-] Please check again!"
if __name__ == "__main__":
mome = scureImage()
mome.main()
|
agusmakmun/Some-Examples-of-Simple-Python-Script
|
Stegano-Extract-and-Read-File-From-Image/script.py
|
Python
|
agpl-3.0
| 2,974
|
# !/usr/bin/python
# -*- coding: cp1252 -*-
#
##################################################################################
#
# Copyright 2016 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
__author__ = "Yaiza Rubio and Félix Brezo <contacto@i3visio.com>"
__version__ = "1.1"
import argparse
import json
import re
import sys
import urllib2
import osrframework.utils.browser as browser
from osrframework.utils.platforms import Platform
class Streakgaming(Platform):
"""
A <Platform> object for Streakgaming.
"""
def __init__(self):
"""
Constructor...
"""
self.platformName = "Streakgaming"
self.tags = ["social", "news", "gaming"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://www.streakgaming.com/forum/members/" + "<usufy>" + ".html"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query.
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["<title>Streak Gaming Online Gambling Forum</title>"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
i3visio/osrframework
|
osrframework/wrappers/pending/streakgaming.py
|
Python
|
agpl-3.0
| 4,315
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2017 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from tests import factories as f
from tests.utils import disconnect_signals, reconnect_signals
import pytest
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
def test_auth_create(client):
url = reverse('auth-list')
user = f.UserFactory.create()
login_data = json.dumps({
"type": "normal",
"username": user.username,
"password": user.username,
})
result = client.post(url, login_data, content_type="application/json")
assert result.status_code == 200
def test_auth_action_register(client, settings):
settings.PUBLIC_REGISTER_ENABLED = True
url = reverse('auth-register')
register_data = json.dumps({
"type": "public",
"username": "test",
"password": "test",
"full_name": "test",
"email": "test@test.com",
})
result = client.post(url, register_data, content_type="application/json")
assert result.status_code == 201
|
dayatz/taiga-back
|
tests/integration/resources_permissions/test_auth_resources.py
|
Python
|
agpl-3.0
| 2,107
|
# -*- coding: utf-8 -*-
__version__ = '1.1.0'
default_app_config = 'ipware.apps.AppConfig'
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/ipware/__init__.py
|
Python
|
agpl-3.0
| 93
|
# -*- coding: utf-8 -*-
# Copyright 2011 Domsense srl (<http://www.domsense.com>)
# Copyright 2011-15 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright 2017 OpenSynergy Indonesia (<https://opensynergy-indonesia.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from __future__ import division
import math
import time
from datetime import datetime, timedelta
# from openerp.tools import float_compare
import pytz
from openerp import api, fields, models
from openerp.exceptions import Warning as UserError
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class HrAttendance(models.Model):
# ref: https://bugs.launchpad.net/openobject-client/+bug/887612
# test: 0.9853 - 0.0085
_inherit = "hr.attendance"
def float_time_convert(self, float_val):
hours = math.floor(abs(float_val))
mins = abs(float_val) - hours
mins = round(mins * 60)
# Original Code
# Comment by Reason:
# 1. Mins can't be greater than 60
# ====================================
# if mins >= 60.0:
# hours = hours + 1
# mins = 0.0
float_time = "%02d:%02d" % (hours, mins)
return float_time
def float_to_datetime(self, float_val):
str_float = self.float_time_convert(float_val)
hours = int(str_float.split(":")[0])
minutes = int(str_float.split(":")[1])
days = 1
if hours / 24 > 0:
days += hours / 24
hours = hours % 24
return datetime(1900, 1, int(days), hours, minutes)
# Original Code
# Comment by Reason:
# 1. Not used
# ==================================================
# def float_to_timedelta(self, float_val):
# str_time = self.float_time_convert(float_val)
# int_hour = int(str_time.split(":")[0])
# int_minute = int(str_time.split(":")[1])
# return timedelta(
# 0,
# (int_hour * 3600.0) + (int_minute * 6.0)),
def total_seconds(self, td):
return (
td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
) / 10 ** 6
def time_difference(self, float_start_time, float_end_time, help_message=False):
# Original Code
# Condition:
# 1. End Time = Duration within working schedule
# 2. Start Time = Duration
# Comment by Reason:
# 1. Start Time can't be greater than end time
# ================================================================
# if float_compare(
# float_end_time, float_start_time, precision_rounding=0.0000001
# ) == -1:
# that means a difference smaller than 0.36 milliseconds
# message = _('End time %s < start time %s %s') % (
# unicode(float_end_time),
# unicode(float_start_time),
# help_message and '(' + help_message + ')' or ''
# )
# raise UserError(message)
delta = self.float_to_datetime(float_end_time) - self.float_to_datetime(
float_start_time
)
return self.total_seconds(delta) / 3600.0
def time_sum(self, float_first_time, float_second_time):
str_first_time = self.float_time_convert(float_first_time)
first_timedelta = timedelta(
0,
int(str_first_time.split(":")[0]) * 3600.0
+ int(str_first_time.split(":")[1]) * 60.0,
)
str_second_time = self.float_time_convert(float_second_time)
second_timedelta = timedelta(
0,
int(str_second_time.split(":")[0]) * 3600.0
+ int(str_second_time.split(":")[1]) * 60.0,
)
return self.total_seconds(first_timedelta + second_timedelta) / 60.0 / 60.0
def split_interval_time_by_precision(
self, start_datetime, duration, precision=0.25
):
# start_datetime: datetime, duration: hours, precision: hours
# returns [(datetime, hours)]
res = []
while duration > precision:
res.append((start_datetime, precision))
start_datetime += timedelta(hours=precision)
duration -= precision
if duration > precision / 2.0:
res.append((start_datetime, precision))
return res
def datetime_to_hour(self, datetime_):
hour = datetime_.hour + datetime_.minute / 60.0 + datetime_.second / 3600.0
return hour
def mid_time_interval(self, datetime_start, delta):
return datetime_start + timedelta(hours=delta / 2.0)
@api.model
def matched_schedule(self, datetime_, weekday_char, calendar_id, context=None):
calendar_attendance_pool = self.env["resource.calendar.attendance"]
datetime_hour = self.datetime_to_hour(datetime_)
matched_schedules = calendar_attendance_pool.search(
[
"&",
"|",
("date_from", "=", False),
("date_from", "<=", datetime_.date()),
"|",
("dayofweek", "=", False),
("dayofweek", "=", weekday_char),
("calendar_id", "=", calendar_id),
("hour_to", ">=", datetime_hour),
("hour_from", "<=", datetime_hour),
],
)
return matched_schedules
# Original Code
# Comment by Reason:
# 1. Not used
# ====================================
# @api.model
# def get_reference_calendar(
# self, employee_id, date=None):
#
# if date is None:
# date = fields.date.context_today()
#
# contract_pool = self.env['hr.contract']
# employee_pool = self.env['hr.employee']
#
# active_contracts = contract_pool.search([
# '&',
# ('employee_id', '=', employee_id),
# '|',
# '&',
# ('date_start', '<=', date),
# '|',
# ('date_end', '>=', date),
# ('date_end', '=', False),
# '&',
# '&',
# ('trial_date_start', '!=', False),
# ('trial_date_start', '<=', date),
# '&',
# ('trial_date_end', '!=', False),
# ('trial_date_end', '>=', date),
# ])
#
# if len(active_contracts) > 1:
# employee = employee_pool.browse(employee_id)
# msg = _('Too many active contracts for employee %s at date %s')
# raise UserError(msg % (employee.name, date))
# elif active_contracts:
# contract = active_contracts[0]
# return contract.working_hours
# else:
# return None
def _ceil_rounding(self, rounding, datetime_):
minutes = datetime_.minute / 60.0 + datetime_.second / 3600.0
return math.ceil(minutes * rounding) / rounding
def _floor_rounding(self, rounding, datetime_):
minutes = datetime_.minute / 60.0 + datetime_.second / 3600.0
return math.floor(minutes * rounding) / rounding
# TODO: this is for functional field
@api.depends(
"triggering_attendance_id",
"triggering_attendance_id.name",
"triggering_attendance_id.action",
"triggering_attendance_id.employee_id",
"employee_id.contract_ids",
"employee_id.contract_ids.date_start",
"employee_id.contract_ids.date_start",
"employee_id.contract_ids.date_end",
"employee_id.contract_ids.trial_date_start",
"employee_id.contract_ids.trial_date_end",
"employee_id.contract_ids.working_hours",
"employee_id.contract_ids.working_hours.attendance_ids",
"employee_id.contract_ids.working_hours.attendance_ids.dayofweek",
"employee_id.contract_ids.working_hours.attendance_ids.date_from",
"employee_id.contract_ids.working_hours.attendance_ids.hour_from",
"employee_id.contract_ids.working_hours.attendance_ids.hour_to",
"employee_id.contract_ids.working_hours.attendance_ids.calendar_id",
)
@api.multi
def _compute_attendance_duration(self): # noqa C901
precision = (
self.env["res.users"]
.browse(self.env.user.id)
.company_id.working_time_precision
)
# 2012.10.16 LF FIX : Get timezone from context
active_tz = pytz.timezone(self.env.context.get("tz") or "UTC")
str_now = datetime.strftime(datetime.now(), DEFAULT_SERVER_DATETIME_FORMAT)
for attendance in self:
duration = 0.0
# 2012.10.16 LF FIX : Attendance in context timezone
attendance_start = (
datetime.strptime(attendance.name, DEFAULT_SERVER_DATETIME_FORMAT)
.replace(tzinfo=pytz.utc)
.astimezone(active_tz)
)
next_attendance_date = str_now
next_attendance = False
# should we compute for sign out too?
if attendance.action == "sign_in":
next_attendances = self.search(
[
("employee_id", "=", attendance.employee_id.id),
("name", ">", attendance.name),
],
order="name",
)
if next_attendances:
next_attendance = next_attendances[0]
# Original Code
# Comment by Reason:
# 1. hr.attendance already has constraints againts it
# ======================================================
# if next_attendance.action == 'sign_in':
# 2012.10.16 LF FIX : Attendance in context timezone
# raise UserError(
# _('Incongruent data: sign-in %s is followed by '
# 'another sign-in') % attendance_start)
next_attendance_date = next_attendance.name
# 2012.10.16 LF FIX : Attendance in context timezone
attendance_stop = (
datetime.strptime(
next_attendance_date, DEFAULT_SERVER_DATETIME_FORMAT
)
.replace(tzinfo=pytz.utc)
.astimezone(active_tz)
)
duration_delta = attendance_stop - attendance_start
duration = self.total_seconds(duration_delta) / 3600.0
duration = round(duration / precision) * precision
attendance.duration = duration
attendance.end_datetime = next_attendance_date
# If calendar is not specified: working days = 24/7
attendance.inside_calendar_duration = duration
attendance.outside_calendar_duration = 0.0
reference_calendar = (
attendance.employee_id.contract_id
and attendance.employee_id.contract_id.working_hours
or False
)
# reference_calendar = self.get_reference_calendar(
# attendance.employee_id.id,
# date=str_now[:10])
if reference_calendar and next_attendance:
# raise UserError("weks")
if reference_calendar:
# TODO applicare prima arrotondamento o tolleranza?
if reference_calendar.attendance_rounding:
float_attendance_rounding = float(
reference_calendar.attendance_rounding
)
rounded_start_hour = self._ceil_rounding(
float_attendance_rounding, attendance_start
)
rounded_stop_hour = self._floor_rounding(
float_attendance_rounding, attendance_stop
)
# if shift is approximately one hour
if abs(1 - rounded_start_hour) < 0.01:
attendance_start = datetime(
attendance_start.year,
attendance_start.month,
attendance_start.day,
attendance_start.hour + 1,
)
else:
attendance_start = datetime(
attendance_start.year,
attendance_start.month,
attendance_start.day,
attendance_start.hour,
int(round(rounded_start_hour * 60.0)),
)
attendance_stop = datetime(
attendance_stop.year,
attendance_stop.month,
attendance_stop.day,
attendance_stop.hour,
int(round(rounded_stop_hour * 60.0)),
)
# again
duration_delta = attendance_stop - attendance_start
duration = self.total_seconds(duration_delta) / 3600.0
duration = round(duration / precision) * precision
attendance.duration = duration
attendance.inside_calendar_duration = 0.0
attendance.outside_calendar_duration = 0.0
calendar_id = reference_calendar.id
intervals_within = 0
# split attendance in intervals = precision
# 2012.10.16 LF FIX : no recursion in split attendance
splitted_attendances = self.split_interval_time_by_precision(
attendance_start, duration, precision
)
counter = 0
for atomic_attendance in splitted_attendances:
counter += 1
centered_attendance = self.mid_time_interval(
atomic_attendance[0],
delta=atomic_attendance[1],
)
# check if centered_attendance is within a working
# schedule
# 2012.10.16 LF FIX : weekday must be single character
# not int
weekday_char = unicode( # noqa: F821
unichr(centered_attendance.weekday() + 48) # noqa: F821
)
matched_schedules = self.matched_schedule(
centered_attendance,
weekday_char,
calendar_id,
)
if len(matched_schedules) > 1:
raise UserError(
_("Wrongly configured working schedule with " "id %s")
% unicode(calendar_id) # noqa: F821
)
if matched_schedules:
intervals_within += 1
# sign in tolerance
if intervals_within == 1:
att = matched_schedules[0]
att_start = self.datetime_to_hour(attendance_start)
if (
att.hour_from
and (
att_start
>= att_start - att.hour_from - att.tolerance_to
)
< 0.01
):
# handling float roundings (<=)
additional_intervals = round(
(att_start - att.hour_from) / precision
)
intervals_within += additional_intervals
attendance.duration = self.time_sum(
attendance.duration,
additional_intervals * precision,
)
# sign out tolerance
if len(splitted_attendances) == counter:
att = matched_schedules[0]
att_stop = self.datetime_to_hour(attendance_stop)
if att_stop <= att.hour_to and (
att_stop - att.hour_to + att.tolerance_from
) > (-0.01):
# handling float roundings (>=)
additional_intervals = round(
(att.hour_to - att_stop) / precision
)
intervals_within += additional_intervals
attendance.duration = self.time_sum(
attendance.duration,
additional_intervals * precision,
)
attendance.inside_calendar_duration = intervals_within * precision
# make difference using time in order to avoid
# rounding errors
# inside_calendar_duration can't be > duration
attendance.outside_calendar_duration = self.time_difference(
attendance.inside_calendar_duration,
attendance.duration,
help_message="Attendance ID %s" % attendance.id,
)
if reference_calendar.overtime_rounding:
if attendance.outside_calendar_duration:
overtime = attendance.outside_calendar_duration
cal = reference_calendar
if cal.overtime_rounding_tolerance:
overtime = self.time_sum(
overtime, cal.overtime_rounding_tolerance
)
float_overtime_rounding = float(
reference_calendar.overtime_rounding
)
attendance.outside_calendar_duration = (
math.floor(overtime * float_overtime_rounding)
/ float_overtime_rounding
)
@api.depends("name", "action", "employee_id")
@api.multi
def _compute_triggering_attendance_id(self):
for attendance in self:
attendance.triggering_attendance_id = False
if attendance.action == "sign_in":
attendance.triggering_attendance_id = attendance.id
elif attendance.action == "sign_out":
previous_attendances = self.search(
[
("employee_id", "=", attendance.employee_id.id),
("name", "<", attendance.name),
("action", "=", "sign_in"),
],
order="name",
)
if previous_attendances:
attendance.triggering_attendance_id = previous_attendances[-1].id
@api.depends("name")
@api.multi
def _compute_day(self):
for attendance in self:
attendance.day = time.strftime(
"%Y-%m-%d", time.strptime(attendance.name, "%Y-%m-%d %H:%M:%S")
)
triggering_attendance_id = fields.Many2one(
string="Triggering Attendance",
comodel_name="hr.attendance",
compute="_compute_triggering_attendance_id",
store=True,
)
duration = fields.Float(
compute="_compute_attendance_duration",
multi="duration",
string="Attendance duration",
store=True,
)
end_datetime = fields.Datetime(
compute="_compute_attendance_duration",
multi="duration",
string="End date time",
store=True,
)
outside_calendar_duration = fields.Float(
compute="_compute_attendance_duration",
multi="duration",
string="Overtime",
store=True,
)
inside_calendar_duration = fields.Float(
compute="_compute_attendance_duration",
multi="duration",
string="Duration within working schedule",
store=True,
)
day = fields.Date(
compute="_compute_day",
string="Day",
store=True,
select=1,
)
@api.multi
def button_dummy(self):
for att in self:
# By writing the 'action' field without changing it,
# I'm forcing the '_compute_attendance_duration' to be executed
att.write({"action": att.action})
|
open-synergy/opnsynid-hr
|
hr_attendance_computation/models/hr_attendance.py
|
Python
|
agpl-3.0
| 21,338
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-06 16:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Logs',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('message', models.CharField(max_length=200)),
('date', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.IntegerField()),
('last_consomation', models.DateField()),
('user_Acount_inside_club_Nix', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('description', models.CharField(max_length=200)),
('price', models.IntegerField()),
('hide', models.BooleanField(default=False)),
('image', models.ImageField(upload_to='static')),
],
),
migrations.AddField(
model_name='logs',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snackix.Product'),
),
migrations.AddField(
model_name='logs',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snackix.Member'),
),
]
|
tiregram/Boufix
|
snackix/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 2,164
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from udata.api import api, fields, base_reference
from udata.core.badges.api import badge_fields
from udata.core.dataset.api_fields import dataset_ref_fields
from udata.core.organization.api_fields import org_ref_fields
from udata.core.user.api_fields import user_ref_fields
from .models import REUSE_TYPES, IMAGE_SIZES
BIGGEST_IMAGE_SIZE = IMAGE_SIZES[0]
reuse_fields = api.model('Reuse', {
'id': fields.String(description='The reuse identifier', readonly=True),
'title': fields.String(description='The reuse title', required=True),
'slug': fields.String(
description='The reuse permalink string', readonly=True),
'type': fields.String(
description='The reuse type', required=True, enum=REUSE_TYPES.keys()),
'url': fields.String(
description='The reuse remote URL (website)', required=True),
'description': fields.Markdown(
description='The reuse description in Markdown', required=True),
'tags': fields.List(
fields.String, description='Some keywords to help in search'),
'badges': fields.List(fields.Nested(badge_fields),
description='The reuse badges',
readonly=True),
'featured': fields.Boolean(
description='Is the reuse featured', readonly=True),
'private': fields.Boolean(
description='Is the reuse private to the owner or the organization'),
'image': fields.ImageField(description='The reuse thumbnail thumbnail (cropped) URL'),
'image_thumbnail': fields.ImageField(attribute='image', size=BIGGEST_IMAGE_SIZE,
description='The reuse thumbnail thumbnail URL. This is the square '
'({0}x{0}) and cropped version.'.format(BIGGEST_IMAGE_SIZE)),
'created_at': fields.ISODateTime(
description='The reuse creation date', readonly=True),
'last_modified': fields.ISODateTime(
description='The reuse last modification date', readonly=True),
'deleted': fields.ISODateTime(
description='The organization identifier', readonly=True),
'datasets': fields.List(
fields.Nested(dataset_ref_fields), description='The reused datasets'),
'organization': fields.Nested(
org_ref_fields, allow_null=True,
description='The publishing organization', readonly=True),
'owner': fields.Nested(
user_ref_fields, description='The owner user', readonly=True,
allow_null=True),
'metrics': fields.Raw(description='The reuse metrics', readonly=True),
'uri': fields.UrlFor(
'api.reuse', lambda o: {'reuse': o},
description='The reuse API URI', readonly=True),
'page': fields.UrlFor(
'reuses.show', lambda o: {'reuse': o},
description='The reuse page URL', readonly=True),
})
reuse_page_fields = api.model('ReusePage', fields.pager(reuse_fields))
reuse_suggestion_fields = api.model('ReuseSuggestion', {
'id': fields.String(description='The reuse identifier', readonly=True),
'title': fields.String(description='The reuse title', readonly=True),
'slug': fields.String(
description='The reuse permalink string', readonly=True),
'image_url': fields.String(description='The reuse thumbnail URL'),
'page': fields.UrlFor(
'reuses.show_redirect', lambda o: {'reuse': o['slug']},
description='The reuse page URL', readonly=True),
'score': fields.Float(
description='The internal match score', readonly=True),
})
reuse_ref_fields = api.inherit('ReuseReference', base_reference, {
'title': fields.String(description='The reuse title', readonly=True),
'image': fields.ImageField(description='The reuse thumbnail thumbnail (cropped) URL'),
'image_thumbnail': fields.ImageField(attribute='image', size=BIGGEST_IMAGE_SIZE,
description='The reuse thumbnail thumbnail URL. This is the square '
'({0}x{0}) and cropped version.'.format(BIGGEST_IMAGE_SIZE)),
'uri': fields.UrlFor(
'api.reuse', lambda o: {'reuse': o},
description='The reuse API URI', readonly=True),
'page': fields.UrlFor(
'reuses.show', lambda o: {'reuse': o},
description='The reuse page URL', readonly=True),
})
reuse_type_fields = api.model('ReuseType', {
'id': fields.String(description='The reuse type identifier'),
'label': fields.String(description='The reuse type display name')
})
|
davidbgk/udata
|
udata/core/reuse/api_fields.py
|
Python
|
agpl-3.0
| 4,422
|
"""
Helper for keeping processes singletons
"""
__license__ = """
This file is part of RAPD
Copyright (C) 2016-2018 Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2016-03-02"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import fcntl
import os
def lock_file(file_path):
"""
Method to make sure only one instance is running on this machine.
If file_path is False, no locking will occur
If file_path is not False and is already locked, an error will be thrown
If file_path is not False and can be locked, a False will be returned
Keyword arguments
file_path -- potential file for maintaining lock
"""
# If file_path is a path, try to lock
if file_path:
# Create the directory for file_path if it does not exist
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
global _file_handle
_file_handle = open(file_path, "w")
try:
fcntl.lockf(_file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
return False
except IOError:
#raise Exception("%s is already locked, unable to run" % file_path)
return True
# If file_path is False, always return False
else:
return True
def close_lock_file():
"""Close the _file_handle handle."""
_file_handle.close()
|
RAPD/RAPD
|
src/utils/lock.py
|
Python
|
agpl-3.0
| 1,987
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
from django.core.exceptions import ImproperlyConfigured
from shoop.utils.setup import Setup
from . import base_settings
def configure(setup):
base_settings.configure(setup)
local_settings_file = os.getenv('LOCAL_SETTINGS_FILE')
# Backward compatibility: Find from current directory, if
# LOCAL_SETTINGS_FILE environment variables is unset
if local_settings_file is None:
cand = os.path.join(os.path.dirname(__file__), 'local_settings.py')
if os.path.exists(cand):
local_settings_file = cand
# Load local settings from file
if local_settings_file:
local_settings_ns = {
'__file__': local_settings_file,
}
with open(local_settings_file, 'rb') as fp:
compiled = compile(fp.read(), local_settings_file, 'exec')
exec(compiled, local_settings_ns)
if 'configure' not in local_settings_ns:
raise ImproperlyConfigured('No configure in local_settings')
local_configure = local_settings_ns['configure']
local_configure(setup)
return setup
globals().update(Setup.configure(configure))
|
akx/shoop
|
shoop_workbench/settings/__init__.py
|
Python
|
agpl-3.0
| 1,393
|
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import test_accounting
from . import test_donation
|
mozaik-association/mozaik
|
mozaik_account/tests/__init__.py
|
Python
|
agpl-3.0
| 153
|
# -*- coding: utf8 -*-
#
# Copyright (C) 2014 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import test_purchase_group_by_period
|
odoousers2014/odoo-addons-supplier_price
|
purchase_group_by_period/tests/__init__.py
|
Python
|
agpl-3.0
| 821
|
#!/usr/bin/env python
#
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002 Bryce "Zooko" Wilcox-O'Hearn
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
#
__cvsid = '$Id: mencode_unittests.py,v 1.1 2002/06/25 03:54:57 zooko Exp $'
# Python standard library modules
import operator
import random
import traceback
try:
import unittest
except:
class unittest:
class TestCase:
pass
pass
pass
# pyutil modules
import humanreadable
import memutil
# Mnet modules
from mencode import *
class Testy(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_decode_random_illformed_junk(self):
try:
mdecode(string.join(filter(lambda x: x != ':', map(chr, map(random.randrange, [0]*20, [256]*20))), ''))
raise "This shouldn't have decoded without an exception."
except MencodeError:
# Good. That was definitely ill-formed.
pass
def test_decode_other_random_illformed_junk(self):
l = random.randrange(0, 200)
s = str(l) + ':' + "x" * (l-1) # too short. Heh heh.
try:
mdecode(s)
raise "This shouldn't have decoded without an exception."
except MencodeError:
# Good. That was definitely ill-formed.
pass
def test_decode_unknown(self):
try:
decode_unknown('(())', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_unknown('((111))', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
assert decode_unknown('((0:))', 0) == (UNKNOWN_TYPE, 5)
assert decode_unknown(')', 0) == (UNKNOWN_TYPE, 0)
assert decode_unknown('1:a2:ab)', 0) == (UNKNOWN_TYPE, 7)
def test_encode_and_decode_string_with_nulls(self):
strwn = "\000\001\000"
def test_encode_and_decode_none(self):
assert mdecode(mencode(None)) == None
def test_encode_and_decode_long(self):
assert mdecode(mencode(-23452422452342L)) == -23452422452342L
def test_encode_and_decode_int(self):
assert mdecode(mencode(2)) == 2
def test_dict_enforces_order(self):
mdecode('(4:dict(3:int1:0)(4:null)(3:int1:1)(4:null))')
try:
mdecode('(4:dict(3:int1:1)(4:null)(3:int1:0)(4:null))')
except MencodeError:
pass
def test_dict_forbids_key_repeat(self):
try:
mdecode('(4:dict(3:int1:1)(4:null)(3:int1:1)(4:null))')
except MencodeError:
pass
def test_decode_unknown_type_not_in_dict(self):
try:
mdecode('(7:garbage)')
return false
except UnknownTypeError:
pass
def test_decode_unknown_type_in_dict(self):
# I strongly disagree with this feature. It violates canonicity (which, as we all know, open up security holes), as well as being potentially confusing to debuggers and to mencode maintainers, and it is currently not needed. --Zooko 2001-06-03
assert mdecode('(4:dict(7:garbage)(3:int1:4)(4:null)(3:int1:5))') == {None: 5}
assert mdecode('(4:dict(4:null)(3:int1:5)(3:int1:4)(7:garbage))') == {None: 5}
def test_MencodeError_in_decode_unknown(self):
try:
mdecode('(4:dict(7:garbage)(2:int1:4)(4:null)(3:int1:5))')
return 0
except MencodeError:
pass
def test_decode_raw_string(self):
assert decode_raw_string('1:a', 0) == ('a', 3)
assert decode_raw_string('0:', 0) == ('', 2)
assert decode_raw_string('10:aaaaaaaaaaaaaaaaaaaaaaaaa', 0) == ('aaaaaaaaaa', 13)
assert decode_raw_string('10:', 1) == ('', 3)
try:
decode_raw_string('11:', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_raw_string('01:a', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_raw_string('11', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_raw_string('h', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
try:
decode_raw_string('h:', 0)
return 0
except IndexError:
pass
except ValueError:
pass
except MencodeError:
pass
def test_decode_noncanonical_int(self):
try:
mdecode('(3:int2:03)')
assert false, "non canonical integer allowed '03'"
except MencodeError:
pass
try:
mdecode('(3:int2:3 )')
assert false, "non canonical integer allowed '3 '"
except MencodeError:
pass
try:
mdecode('(3:int2: 3)')
assert false, "non canonical integer allowed ' 3'"
except MencodeError:
pass
try:
mdecode('(3:int2:-0)')
assert false, "non canonical integer allowed '-0'"
except MencodeError:
pass
def test_encode_and_decode_hash_key(self):
x = {42: 3}
y = {'42': 3}
assert mdecode(mencode(x)) == x
assert mdecode(mencode(y)) == y
def test_encode_and_decode_list(self):
assert mdecode(mencode([])) == []
def test_encode_and_decode_tuple(self):
assert mdecode(mencode(())) == []
def test_encode_and_decode_dict(self):
assert mdecode(mencode({})) == {}
def test_encode_and_decode_complex_object(self):
spam = [[], 0, -3, -345234523543245234523L, {}, 'spam', None, {'a': 3}, {69: []}]
assert mencode(mdecode(mencode(spam))) == mencode(spam)
assert mdecode(mencode(spam)) == spam
def test_preencoded_thing(self):
thing = {"dirty limmerk": ["there once was a man from peru", "who set out to sail a canoe"]}
pthing = PreEncodedThing(thing)
assert len(mencode(thing)) == len(pthing)
assert mencode(pthing) == mencode(thing)
assert mdecode(mencode(thing)) == mdecode(mencode(pthing))
def test_dict_as_key(self):
try:
mdecode('(4:dict(4:dict)(4:null))')
assert false, "dict cannot be a key but it was allowed by mdecode"
except MencodeError:
return
def test_rej_dict_with_float(self):
try:
s = mencode({'foo': 0.9873})
assert 0, "You can't encode floats! Anyway, the result: %s, is probably not what we meant." % humanreadable.hr(s)
except MencodeError, le:
try:
# print "got exce1: %s" % humanreadable.hr(le)
s2 = mencode({'foo': 0.9873})
assert 0, "You can't encode floats! Anyway, the result: %s, is probably not what we meant." % humanreadable.hr(s2)
except MencodeError, le:
# print "got exce2: %s" % humanreadable.hr(le)
# Good! we want an exception when we try this.
return
def test_rej_float(self):
try:
s = mencode(0.9873)
assert 0, "You can't encode floats! Anyway, the result: %s, is probably not what we meant." % humanreadable.hr(s)
except MencodeError, le:
try:
s2 = mencode(0.9873)
assert 0, "You can't encode floats! Anyway, the result: %s, is probably not what we meant." % humanreadable.hr(s2)
except MencodeError, le:
# Good! we want an exception when we try this.
return
def test_no_leakage(self):
# Test every (other) test here for leakage! That's my cheap way to try to exercise the weird internal cases in the compiled code...
for m in dir(self.__class__):
if m[:len("test_")] == "test_":
if m != "test_no_leakage":
# print "testing for memory leak: %s" % m
self._help_test_no_leakage(getattr(self, m))
def _help_test_no_leakage(self, f):
slope = memutil.measure_mem_leakage(f, 2**7, iterspersample=2**4)
# print "slope: ", slope
if slope > 0.0001:
raise "%s leaks memory at a rate of approximately %s Python objects per invocation" % (f, slope,)
def _bench_it_mencode(n):
"""
For use with utilscripts/benchfunc.py.
"""
d = {}
for i in xrange(n):
d[i] = { i: 'spam', i + 1: 'eggs', i * 2: 'bacon'}
mencode(d)
def _bench_it_mencode_plus_mdecode(n):
"""
For use with utilscripts/benchfunc.py.
"""
d = {}
for i in xrange(n):
d[i] = { i: 'spam', i + 1: 'eggs', i * 2: 'bacon'*n}
mdecode(mencode(d))
def _profile_test_mdecode_implementation_speed():
import mojoutil
profit = mojoutil._dont_enable_if_you_want_speed_profit
profit(_real_test_mdecode_implementation_speed)
def _real_test_mdecode_implementation_speed():
import os
import time
msgpath = os.path.join(os.environ.get('HOME'), 'tmp/messages')
filenamelist = os.listdir(msgpath)
filenamelist.sort()
encoded_messages = []
sizes_list = []
for name in filenamelist:
encoded_messages.append( open(os.path.join(msgpath, name), 'rb').read() )
sizes_list.append( len(encoded_messages[-1]) )
totalbytes = reduce(lambda a,b: a+b, sizes_list)
average = totalbytes / len(sizes_list)
sizes_list.sort()
median = sizes_list[len(sizes_list)/2]
print 'read in %d messages totaling %d bytes, averaging %d bytes, median size of %d' % (len(sizes_list), totalbytes, average, median)
### 100% python speed test
print 'decoding using python implementation...'
# setup
decodersdict['string'] = decode_raw_string
# end setup
t1 = time.time()
for m in encoded_messages:
try:
mdecode(m)
except:
print '!',
t2 = time.time()
print 'done. total decoding time: %3.3f' % (t2 - t1,)
### partial C speed test
print 'decoding using partial C implementation...'
# setup
decode_raw_string = _c_mencode_help._c_decode_raw_string
decodersdict['string'] = decode_raw_string
# end setup
t1 = time.time()
for m in encoded_messages:
try:
mdecode(m)
except:
print '!',
t2 = time.time()
print 'done. total decoding time: %3.3f' % (t2 - t1,)
def _profile_test_mencode_implementation_speed():
import mojoutil
profit = mojoutil._dont_enable_if_you_want_speed_profit
profit(_real_test_mencode_implementation_speed)
def _real_test_mencode_implementation_speed():
import os
import time
msgpath = os.path.join(os.environ.get('HOME'), 'tmp/messages')
filenamelist = os.listdir(msgpath)
filenamelist.sort()
decoded_messages = []
sizes_list = []
for name in filenamelist:
encoding = open(os.path.join(msgpath, name), 'rb').read()
sizes_list.append( len(encoding) )
decoded_messages.append( mdecode(encoding) )
totalbytes = reduce(lambda a,b: a+b, sizes_list)
average = totalbytes / len(sizes_list)
sizes_list.sort()
median = sizes_list[len(sizes_list)/2]
print 'read and decoded %d messages totaling %d bytes, averaging %d bytes, median size of %d' % (len(sizes_list), totalbytes, average, median)
### 100% python speed test
print 'encoding using python implementation...'
# setup
# TODO none needed yet
# end setup
t1 = time.time()
for m in decoded_messages:
try:
mencode(m)
except:
print '!',
t2 = time.time()
print 'done. total encoding time: %3.3f' % (t2 - t1,)
def _real_test_encode_string_implementation_speed():
import os, time
ntests = 500
mlog = os.path.join(os.environ.get('MNETDIR'), 'common', 'mencode.py')
lines = open(mlog, 'r').readlines()
del(mlog)
o = StringIO()
t1 = time.time()
for i in xrange(ntests):
for line in lines:
encode_string(line, o)
o.seek(0)
t2 = time.time()
print 'done testing python impl of encode_string. total encoding time: %3.3f' % (t2 - t1,)
_c_encode_string = _c_mencode_help._c_encode_string
o = StringIO()
t1 = time.time()
for i in xrange(ntests):
for line in lines:
_c_encode_string(line, o)
o.seek(0)
t2 = time.time()
print 'done testing C impl of encode_string. total encoding time: %3.3f' % (t2 - t1,)
if __name__ == '__main__':
if hasattr(unittest, 'main'):
unittest.main()
else:
# Here's our manual implementation of unittest:
t = Testy()
for m in dir(t.__class__):
if m[:len("test_")] == "test_":
print m, "... ",
getattr(t, m)()
print
pass
|
zooko/egtp
|
common/mencode_unittests.py
|
Python
|
agpl-3.0
| 13,498
|
# -*- coding: utf-8 -*-
import simplejson
from django.core.management.base import BaseCommand
from django.contrib.gis.geos import MultiPolygon, Polygon
from ...models import State
class Command(BaseCommand):
args = 'filename'
help = 'Import states from a GeoJSON file'
def handle(self, *args, **options):
for filename in args:
data_json = open(filename, 'r').read()
data = simplejson.loads(data_json)
for feature in data['features']:
state = State(
name=feature['properties'].get('name'),
code=feature['properties'].get('code'),
)
if feature['geometry'].get('type') == 'MultiPolygon':
state.geom = MultiPolygon(
[Polygon(poly) for poly in feature['geometry'].get('coordinates')[0]]
)
else:
state.geom = MultiPolygon(Polygon(feature['geometry'].get('coordinates')[0]))
state.save()
|
ibamacsr/routes_registry_api
|
routes_registry_api/routes/management/commands/importstates.py
|
Python
|
agpl-3.0
| 1,055
|
# -*- coding: utf-8 -*-
# © 2016 Comunitea - Kiko Sanchez <kiko@comunitea.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.
from odoo import api, fields, models, _
import odoo.addons.decimal_precision as dp
class ShippingContainerType(models.Model):
_name = "shipping.container.type"
name = fields.Char("Container type", required=True)
volume = fields.Float("Volumen", help="Container volume (m3)", required=True)
length = fields.Float("Length", help="Length(m)")
height = fields.Float("Height", help="Height(m)")
width = fields.Float("Width", help="Width(m)")
@api.onchange('length', 'height', 'width')
def onchange_dimensions(self):
if self.length and self.height and self.width:
self.volume = self.length * self.height * self.width
class ShippingContainer(models.Model):
_name = "shipping.container"
@api.one
def _get_moves(self):
self.move_ids_count = len(self.move_ids)
@api.one
def _get_partners(self):
self.partner_ids = self.picking_ids.partner_id
@api.multi
def _available_volume(self):
for container in self:
volume = container.shipping_container_type_id.volume
weight = 0.00
for move in container.move_ids:
volume -= move.product_id.volume * move.product_uom_qty
weight += move.product_id.weight * move.product_uom_qty
container.available_volume = volume
container.weight = weight
name = fields.Char("Container Ref.", required=True)
date_expected = fields.Date("Date expected", required=True)
date_shipment = fields.Date("Shipment date")
picking_ids = fields.One2many("stock.picking", "shipping_container_id", "Pickings")
company_id = fields. \
Many2one("res.company", "Company", required=True,
default=lambda self:
self.env['res.company']._company_default_get('shipping.container'))
harbor_id = fields.Many2one('res.harbor', string="Harbor", required=True)
move_ids = fields.One2many('stock.move', 'shipping_container_id', string="Moves")
move_ids_count = fields.Integer('Move ids count', compute="_get_moves")
harbor_dest_id = fields.Many2one('res.harbor', string="Dest. harbor")
state = fields.Selection([('loading', 'Loading'),
('transit', 'Transit'),
('destination', 'Destination')],
default='loading')
shipping_container_type_id = fields.Many2one('shipping.container.type', 'Type')
available_volume = fields.Float("Available volume (m3)", compute="_available_volume")
weight = fields.Float("Weight (kgr.)", compute="_available_volume")
incoterm_id = fields.Many2one('stock.incoterms', string='Incoterm')
_sql_constraints = [
('name_uniq', 'unique(name)', 'Container name must be unique')
]
@api.multi
def action_view_move_ids(self):
action = self.env.ref(
'shipping_container.container_picking_tree_action').read()[0]
action['domain'] = [('id', 'in', self.move_ids.ids)]
return action
def set_transit(self):
self.state = 'transit'
def set_destination(self):
self.state = 'destination'
def set_loading(self):
self.state = 'loading'
@api.multi
def write(self, vals):
if vals.get('date_expected', False):
for container in self:
if vals['date_expected'] != container.date_expected:
for pick in container.picking_ids:
pick.min_date = vals['date_expected']
return super(ShippingContainer, self).write(vals)
|
Comunitea/CMNT_00098_2017_JIM_addons
|
shipping_container/models/shipping_container.py
|
Python
|
agpl-3.0
| 3,735
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from . import base_mapping
from . import advanced_query_mapping
|
emgirardin/compassion-modules
|
message_center_compassion/mappings/__init__.py
|
Python
|
agpl-3.0
| 467
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import family_aux_mass_edit
from . import family_associate_to_family_aux
from . import family_aux_associate_to_address_aux
|
CLVsol/clvsol_odoo_addons
|
clv_family_aux/wizard/__init__.py
|
Python
|
agpl-3.0
| 281
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-10 16:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0023_messagetemplate'),
]
operations = [
migrations.CreateModel(
name='DocumentFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('content_type', models.CharField(choices=[('APPLICATION_CSV', 'application/csv'), ('APPLICATION_DOC', 'application/doc'), ('APPLICATION_PDF', 'application/pdf'), ('APPLICATION_XLS', 'application/xls'), ('APPLICATION_XLSX', 'application/xlsx'), ('APPLICATION_XML', 'application/xml'), ('APPLICATION_ZIP', 'application/zip'), ('IMAGE_JPEG', 'image/jpeg'), ('IMAGE_GIF', 'image/gif'), ('IMAGE_PNG', 'image/png'), ('TEXT_HTML', 'text/html'), ('TEXT_PLAIN', 'text/plain')], max_length=50)),
('creation_date', models.DateTimeField(auto_now=True)),
('storage_duration', models.IntegerField()),
('full_path', models.CharField(max_length=255)),
('physical_name', models.UUIDField(default=uuid.uuid4, editable=False)),
('physical_extension', models.CharField(max_length=10)),
('description', models.CharField(blank=True, max_length=255, null=True)),
('sub_directory', models.CharField(blank=True, max_length=100, null=True)),
('size', models.IntegerField(blank=True, null=True)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
uclouvain/osis_louvain
|
base/migrations/0024_documentfile.py
|
Python
|
agpl-3.0
| 1,981
|
"""
Courseware page.
"""
from bok_choy.page_object import PageObject, unguarded
from bok_choy.promise import EmptyPromise
import re
from selenium.webdriver.common.action_chains import ActionChains
from common.test.acceptance.pages.lms.bookmarks import BookmarksPage
from common.test.acceptance.pages.lms.course_page import CoursePage
class CoursewarePage(CoursePage):
"""
Course info.
"""
url_path = "courseware/"
xblock_component_selector = '.vert .xblock'
# TODO: TNL-6546: Remove sidebar selectors
section_selector = '.chapter'
subsection_selector = '.chapter-content-container a'
def __init__(self, browser, course_id):
super(CoursewarePage, self).__init__(browser, course_id)
self.nav = CourseNavPage(browser, self)
def is_browser_on_page(self):
return self.q(css='.course-content').present
@property
def chapter_count_in_navigation(self):
"""
Returns count of chapters available on LHS navigation.
"""
return len(self.q(css='nav.course-navigation a.chapter'))
# TODO: TNL-6546: Remove and find callers.
@property
def num_sections(self):
"""
Return the number of sections in the sidebar on the page
"""
return len(self.q(css=self.section_selector))
# TODO: TNL-6546: Remove and find callers.
@property
def num_subsections(self):
"""
Return the number of subsections in the sidebar on the page, including in collapsed sections
"""
return len(self.q(css=self.subsection_selector))
@property
def xblock_components(self):
"""
Return the xblock components within the unit on the page.
"""
return self.q(css=self.xblock_component_selector)
@property
def num_xblock_components(self):
"""
Return the number of rendered xblocks within the unit on the page
"""
return len(self.xblock_components)
def xblock_component_type(self, index=0):
"""
Extract rendered xblock component type.
Returns:
str: xblock module type
index: which xblock to query, where the index is the vertical display within the page
(default is 0)
"""
return self.q(css=self.xblock_component_selector).attrs('data-block-type')[index]
def xblock_component_html_content(self, index=0):
"""
Extract rendered xblock component html content.
Returns:
str: xblock module html content
index: which xblock to query, where the index is the vertical display within the page
(default is 0)
"""
# When Student Notes feature is enabled, it looks for the content inside
# `.edx-notes-wrapper-content` element (Otherwise, you will get an
# additional html related to Student Notes).
element = self.q(css='{} .edx-notes-wrapper-content'.format(self.xblock_component_selector))
if element.first:
return element.attrs('innerHTML')[index].strip()
else:
return self.q(css=self.xblock_component_selector).attrs('innerHTML')[index].strip()
def verify_tooltips_displayed(self):
"""
Verify that all sequence navigation bar tooltips are being displayed upon mouse hover.
If a tooltip does not appear, raise a BrokenPromise.
"""
for index, tab in enumerate(self.q(css='#sequence-list > li')):
ActionChains(self.browser).move_to_element(tab).perform()
self.wait_for_element_visibility(
'#tab_{index} > .sequence-tooltip'.format(index=index),
'Tab {index} should appear'.format(index=index)
)
@property
def course_license(self):
"""
Returns the course license text, if present. Else returns None.
"""
element = self.q(css="#content .container-footer .course-license")
if element.is_present():
return element.text[0]
return None
def go_to_sequential_position(self, sequential_position):
"""
Within a section/subsection navigate to the sequential position specified by `sequential_position`.
Arguments:
sequential_position (int): position in sequential bar
"""
def is_at_new_position():
"""
Returns whether the specified tab has become active. It is defensive
against the case where the page is still being loaded.
"""
active_tab = self._active_sequence_tab
try:
return active_tab and int(active_tab.attrs('data-element')[0]) == sequential_position
except IndexError:
return False
sequential_position_css = '#sequence-list #tab_{0}'.format(sequential_position - 1)
self.q(css=sequential_position_css).first.click()
EmptyPromise(is_at_new_position, "Position navigation fulfilled").fulfill()
@property
def sequential_position(self):
"""
Returns the position of the active tab in the sequence.
"""
tab_id = self._active_sequence_tab.attrs('id')[0]
return int(tab_id.split('_')[1])
@property
def _active_sequence_tab(self): # pylint: disable=missing-docstring
return self.q(css='#sequence-list .nav-item.active')
@property
def is_next_button_enabled(self): # pylint: disable=missing-docstring
return not self.q(css='.sequence-nav > .sequence-nav-button.button-next.disabled').is_present()
@property
def is_previous_button_enabled(self): # pylint: disable=missing-docstring
return not self.q(css='.sequence-nav > .sequence-nav-button.button-previous.disabled').is_present()
def click_next_button_on_top(self): # pylint: disable=missing-docstring
self._click_navigation_button('sequence-nav', 'button-next')
def click_next_button_on_bottom(self): # pylint: disable=missing-docstring
self._click_navigation_button('sequence-bottom', 'button-next')
def click_previous_button_on_top(self): # pylint: disable=missing-docstring
self._click_navigation_button('sequence-nav', 'button-previous')
def click_previous_button_on_bottom(self): # pylint: disable=missing-docstring
self._click_navigation_button('sequence-bottom', 'button-previous')
def _click_navigation_button(self, top_or_bottom_class, next_or_previous_class):
"""
Clicks the navigation button, given the respective CSS classes.
"""
previous_tab_id = self._active_sequence_tab.attrs('data-id')[0]
def is_at_new_tab_id():
"""
Returns whether the active tab has changed. It is defensive
against the case where the page is still being loaded.
"""
active_tab = self._active_sequence_tab
try:
return active_tab and previous_tab_id != active_tab.attrs('data-id')[0]
except IndexError:
return False
self.q(
css='.{} > .sequence-nav-button.{}'.format(top_or_bottom_class, next_or_previous_class)
).first.click()
EmptyPromise(is_at_new_tab_id, "Button navigation fulfilled").fulfill()
@property
def can_start_proctored_exam(self):
"""
Returns True if the timed/proctored exam timer bar is visible on the courseware.
"""
return self.q(css='button.start-timed-exam[data-start-immediately="false"]').is_present()
def start_timed_exam(self):
"""
clicks the start this timed exam link
"""
self.q(css=".xblock-student_view .timed-exam .start-timed-exam").first.click()
self.wait_for_element_presence(".proctored_exam_status .exam-timer", "Timer bar")
def stop_timed_exam(self):
"""
clicks the stop this timed exam link
"""
self.q(css=".proctored_exam_status button.exam-button-turn-in-exam").first.click()
self.wait_for_element_absence(".proctored_exam_status .exam-button-turn-in-exam", "End Exam Button gone")
self.wait_for_element_presence("button[name='submit-proctored-exam']", "Submit Exam Button")
self.q(css="button[name='submit-proctored-exam']").first.click()
self.wait_for_element_absence(".proctored_exam_status .exam-timer", "Timer bar")
def start_proctored_exam(self):
"""
clicks the start this timed exam link
"""
self.q(css='button.start-timed-exam[data-start-immediately="false"]').first.click()
# Wait for the unique exam code to appear.
# self.wait_for_element_presence(".proctored-exam-code", "unique exam code")
def has_submitted_exam_message(self):
"""
Returns whether the "you have submitted your exam" message is present.
This being true implies "the exam contents and results are hidden".
"""
return self.q(css="div.proctored-exam.completed").visible
def content_hidden_past_due_date(self):
"""
Returns whether the "the due date for this ___ has passed" message is present.
___ is the type of the hidden content, and defaults to subsection.
This being true implies "the ___ contents are hidden because their due date has passed".
"""
message = "this assignment is no longer available"
if self.q(css="div.seq_content").is_present():
return False
for html in self.q(css="div.hidden-content").html:
if message in html:
return True
return False
@property
def entrance_exam_message_selector(self):
"""
Return the entrance exam status message selector on the top of courseware page.
"""
return self.q(css='#content .container section.course-content .sequential-status-message')
def has_entrance_exam_message(self):
"""
Returns boolean indicating presence entrance exam status message container div.
"""
return self.entrance_exam_message_selector.is_present()
def has_passed_message(self):
"""
Returns boolean indicating presence of passed message.
"""
return self.entrance_exam_message_selector.is_present() \
and "You have passed the entrance exam" in self.entrance_exam_message_selector.text[0]
def has_banner(self):
"""
Returns boolean indicating presence of banner
"""
return self.q(css='.pattern-library-shim').is_present()
@property
def is_timer_bar_present(self):
"""
Returns True if the timed/proctored exam timer bar is visible on the courseware.
"""
return self.q(css=".proctored_exam_status .exam-timer").is_present()
def active_usage_id(self):
""" Returns the usage id of active sequence item """
get_active = lambda el: 'active' in el.get_attribute('class')
attribute_value = lambda el: el.get_attribute('data-id')
return self.q(css='#sequence-list .nav-item').filter(get_active).map(attribute_value).results[0]
@property
def breadcrumb(self):
""" Return the course tree breadcrumb shown above the sequential bar """
return [part.strip() for part in self.q(css='.path .position').text[0].split('>')]
def unit_title_visible(self):
""" Check if unit title is visible """
return self.q(css='.unit-title').visible
def bookmark_button_visible(self):
""" Check if bookmark button is visible """
EmptyPromise(lambda: self.q(css='.bookmark-button').visible, "Bookmark button visible").fulfill()
return True
@property
def bookmark_button_state(self):
""" Return `bookmarked` if button is in bookmarked state else '' """
return 'bookmarked' if self.q(css='.bookmark-button.bookmarked').present else ''
@property
def bookmark_icon_visible(self):
""" Check if bookmark icon is visible on active sequence nav item """
return self.q(css='.active .bookmark-icon').visible
def click_bookmark_unit_button(self):
""" Bookmark a unit by clicking on Bookmark button """
previous_state = self.bookmark_button_state
self.q(css='.bookmark-button').first.click()
EmptyPromise(lambda: self.bookmark_button_state != previous_state, "Bookmark button toggled").fulfill()
# TODO: TNL-6546: Remove this helper function
def click_bookmarks_button(self):
""" Click on Bookmarks button """
self.q(css='.bookmarks-list-button').first.click()
bookmarks_page = BookmarksPage(self.browser, self.course_id)
bookmarks_page.visit()
class CoursewareSequentialTabPage(CoursePage):
"""
Courseware Sequential page
"""
def __init__(self, browser, course_id, chapter, subsection, position):
super(CoursewareSequentialTabPage, self).__init__(browser, course_id)
self.url_path = "courseware/{}/{}/{}".format(chapter, subsection, position)
def is_browser_on_page(self):
return self.q(css='nav.sequence-list-wrapper').present
def get_selected_tab_content(self):
"""
return the body of the sequential currently selected
"""
return self.q(css='#seq_content .xblock').text[0]
class CourseNavPage(PageObject):
"""
Handles navigation on the courseware pages, including sequence navigation and
breadcrumbs.
"""
url = None
def __init__(self, browser, parent_page):
super(CourseNavPage, self).__init__(browser)
self.parent_page = parent_page
# TODO: TNL-6546: Remove the following
self.unified_course_view = False
def is_browser_on_page(self):
return self.parent_page.is_browser_on_page
# TODO: TNL-6546: Remove method, outline no longer on courseware page
@property
def sections(self):
"""
Return a dictionary representation of sections and subsections.
Example:
{
'Introduction': ['Course Overview'],
'Week 1': ['Lesson 1', 'Lesson 2', 'Homework']
'Final Exam': ['Final Exam']
}
You can use these titles in `go_to_section` to navigate to the section.
"""
# Dict to store the result
nav_dict = dict()
section_titles = self._section_titles()
# Get the section titles for each chapter
for sec_index, sec_title in enumerate(section_titles):
if len(section_titles) < 1:
self.warning("Could not find subsections for '{0}'".format(sec_title))
else:
# Add one to convert list index (starts at 0) to CSS index (starts at 1)
nav_dict[sec_title] = self._subsection_titles(sec_index + 1)
return nav_dict
@property
def sequence_items(self):
"""
Return a list of sequence items on the page.
Sequence items are one level below subsections in the course nav.
Example return value:
['Chemical Bonds Video', 'Practice Problems', 'Homework']
"""
seq_css = 'ol#sequence-list>li>.nav-item>.sequence-tooltip'
return self.q(css=seq_css).map(self._clean_seq_titles).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def go_to_section(self, section_title, subsection_title):
"""
Go to the section in the courseware.
Every section must have at least one subsection, so specify
both the section and subsection title.
Example:
go_to_section("Week 1", "Lesson 1")
"""
# For test stability, disable JQuery animations (opening / closing menus)
self.browser.execute_script("jQuery.fx.off = true;")
# Get the section by index
try:
sec_index = self._section_titles().index(section_title)
except ValueError:
self.warning("Could not find section '{0}'".format(section_title))
return
# Click the section to ensure it's open (no harm in clicking twice if it's already open)
# Add one to convert from list index to CSS index
section_css = '.course-navigation .chapter:nth-of-type({0})'.format(sec_index + 1)
self.q(css=section_css).first.click()
# Get the subsection by index
try:
subsec_index = self._subsection_titles(sec_index + 1).index(subsection_title)
except ValueError:
msg = "Could not find subsection '{0}' in section '{1}'".format(subsection_title, section_title)
self.warning(msg)
return
# Convert list indices (start at zero) to CSS indices (start at 1)
subsection_css = (
".course-navigation .chapter-content-container:nth-of-type({0}) "
".menu-item:nth-of-type({1})"
).format(sec_index + 1, subsec_index + 1)
# Click the subsection and ensure that the page finishes reloading
self.q(css=subsection_css).first.click()
self._on_section_promise(section_title, subsection_title).fulfill()
def go_to_vertical(self, vertical_title):
"""
Within a section/subsection, navigate to the vertical with `vertical_title`.
"""
# Get the index of the item in the sequence
all_items = self.sequence_items
try:
seq_index = all_items.index(vertical_title)
except ValueError:
msg = "Could not find sequential '{0}'. Available sequentials: [{1}]".format(
vertical_title, ", ".join(all_items)
)
self.warning(msg)
else:
# Click on the sequence item at the correct index
# Convert the list index (starts at 0) to a CSS index (starts at 1)
seq_css = "ol#sequence-list>li:nth-of-type({0})>.nav-item".format(seq_index + 1)
self.q(css=seq_css).first.click()
# Click triggers an ajax event
self.wait_for_ajax()
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _section_titles(self):
"""
Return a list of all section titles on the page.
"""
chapter_css = '.course-navigation .chapter .group-heading'
return self.q(css=chapter_css).map(lambda el: el.text.strip()).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _subsection_titles(self, section_index):
"""
Return a list of all subsection titles on the page
for the section at index `section_index` (starts at 1).
"""
# Retrieve the subsection title for the section
# Add one to the list index to get the CSS index, which starts at one
subsection_css = (
".course-navigation .chapter-content-container:nth-of-type({0}) "
".menu-item a p:nth-of-type(1)"
).format(section_index)
# If the element is visible, we can get its text directly
# Otherwise, we need to get the HTML
# It *would* make sense to always get the HTML, but unfortunately
# the open tab has some child <span> tags that we don't want.
return self.q(
css=subsection_css
).map(
lambda el: el.text.strip().split('\n')[0] if el.is_displayed() else el.get_attribute('innerHTML').strip()
).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _on_section_promise(self, section_title, subsection_title):
"""
Return a `Promise` that is fulfilled when the user is on
the correct section and subsection.
"""
desc = "currently at section '{0}' and subsection '{1}'".format(section_title, subsection_title)
return EmptyPromise(
lambda: self.is_on_section(section_title, subsection_title), desc
)
def go_to_outline(self):
"""
Navigates using breadcrumb to the course outline on the course home page.
Returns CourseHomePage page object.
"""
# To avoid circular dependency, importing inside the function
from common.test.acceptance.pages.lms.course_home import CourseHomePage
course_home_page = CourseHomePage(self.browser, self.parent_page.course_id)
self.q(css='.path a').click()
course_home_page.wait_for_page()
return course_home_page
@unguarded
def is_on_section(self, section_title, subsection_title):
"""
Return a boolean indicating whether the user is on the section and subsection
with the specified titles.
"""
# TODO: TNL-6546: Remove if/else; always use unified_course_view version (if)
if self.unified_course_view:
# breadcrumb location of form: "SECTION_TITLE > SUBSECTION_TITLE > SEQUENTIAL_TITLE"
bread_crumb_current = self.q(css='.position').text
if len(bread_crumb_current) != 1:
self.warning("Could not find the current bread crumb with section and subsection.")
return False
return bread_crumb_current[0].strip().startswith(section_title + ' > ' + subsection_title + ' > ')
else:
# This assumes that the currently expanded section is the one we're on
# That's true right after we click the section/subsection, but not true in general
# (the user could go to a section, then expand another tab).
current_section_list = self.q(css='.course-navigation .chapter.is-open .group-heading').text
current_subsection_list = self.q(css='.course-navigation .chapter-content-container .menu-item.active a p').text
if len(current_section_list) == 0:
self.warning("Could not find the current section")
return False
elif len(current_subsection_list) == 0:
self.warning("Could not find current subsection")
return False
else:
return (
current_section_list[0].strip() == section_title and
current_subsection_list[0].strip().split('\n')[0] == subsection_title
)
# Regular expression to remove HTML span tags from a string
REMOVE_SPAN_TAG_RE = re.compile(r'</span>(.+)<span')
def _clean_seq_titles(self, element):
"""
Clean HTML of sequence titles, stripping out span tags and returning the first line.
"""
return self.REMOVE_SPAN_TAG_RE.search(element.get_attribute('innerHTML')).groups()[0].strip()
# TODO: TNL-6546: Remove. This is no longer needed.
@property
def active_subsection_url(self):
"""
return the url of the active subsection in the left nav
"""
return self.q(css='.chapter-content-container .menu-item.active a').attrs('href')[0]
# TODO: TNL-6546: Remove all references to self.unified_course_view
# TODO: TNL-6546: Remove the following function
def visit_unified_course_view(self):
# use unified_course_view version of the nav
self.unified_course_view = True
# reload the same page with the unified course view
self.browser.get(self.browser.current_url + "&unified_course_view=1")
self.wait_for_page()
|
romain-li/edx-platform
|
common/test/acceptance/pages/lms/courseware.py
|
Python
|
agpl-3.0
| 23,445
|
"""
Factories for Program Enrollment tests.
"""
from __future__ import absolute_import
from uuid import uuid4
import factory
from factory.django import DjangoModelFactory
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.program_enrollments import models
from student.tests.factories import CourseEnrollmentFactory, UserFactory
class ProgramEnrollmentFactory(DjangoModelFactory):
""" A Factory for the ProgramEnrollment model. """
class Meta(object):
model = models.ProgramEnrollment
user = factory.SubFactory(UserFactory)
external_user_key = None
program_uuid = factory.LazyFunction(uuid4)
curriculum_uuid = factory.LazyFunction(uuid4)
status = 'enrolled'
PROGRAM_COURSE_ENROLLMENT_DEFAULT_COURSE_KEY = (
CourseKey.from_string("course-v1:edX+DemoX+Demo_Course")
)
class ProgramCourseEnrollmentFactory(DjangoModelFactory):
""" A factory for the ProgramCourseEnrollment model. """
class Meta(object):
model = models.ProgramCourseEnrollment
program_enrollment = factory.SubFactory(ProgramEnrollmentFactory)
course_enrollment = factory.SubFactory(CourseEnrollmentFactory)
course_key = factory.LazyAttribute(
lambda pce: (
pce.course_enrollment.course_id
if pce.course_enrollment
else PROGRAM_COURSE_ENROLLMENT_DEFAULT_COURSE_KEY
)
)
status = 'active'
|
ESOedX/edx-platform
|
lms/djangoapps/program_enrollments/tests/factories.py
|
Python
|
agpl-3.0
| 1,400
|
"""
Database ORM models managed by this Django app
Please do not integrate directly with these models!!! This app currently
offers one programmatic API -- api.py for direct Python integration.
"""
import re
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import gettext_lazy as _
from model_utils.models import TimeStampedModel
from simple_history.models import HistoricalRecords
class Organization(TimeStampedModel):
"""
An Organization is a representation of an entity which publishes/provides
one or more courses delivered by the LMS. Organizations have a base set of
metadata describing the organization, including id, name, and description.
"""
name = models.CharField(max_length=255, db_index=True)
short_name = models.CharField(
max_length=255,
unique=True,
verbose_name='Short Name',
help_text=_(
'Unique, short string identifier for organization. '
'Please do not use spaces or special characters. '
'Only allowed special characters are period (.), hyphen (-) and underscore (_).'
),
)
description = models.TextField(null=True, blank=True)
logo = models.ImageField(
upload_to='organization_logos',
help_text=_('Please add only .PNG files for logo images. This logo will be used on certificates.'),
null=True, blank=True, max_length=255
)
active = models.BooleanField(default=True)
history = HistoricalRecords()
def __str__(self):
return f"{self.name} ({self.short_name})"
def clean(self):
if not re.match("^[a-zA-Z0-9._-]*$", self.short_name):
raise ValidationError(_('Please do not use spaces or special characters in the short name '
'field. Only allowed special characters are period (.), hyphen (-) '
'and underscore (_).'))
class OrganizationCourse(TimeStampedModel):
"""
An OrganizationCourse represents the link between an Organization and a
Course (via course key). Because Courses are not true Open edX entities
(in the Django/ORM sense) the modeling and integrity is limited to that
of specifying course identifier strings in this model.
"""
course_id = models.CharField(max_length=255, db_index=True, verbose_name='Course ID')
organization = models.ForeignKey(Organization, db_index=True, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
history = HistoricalRecords()
class Meta:
""" Meta class for this Django model """
unique_together = (('course_id', 'organization'),)
verbose_name = _('Link Course')
verbose_name_plural = _('Link Courses')
|
edx/edx-organizations
|
organizations/models.py
|
Python
|
agpl-3.0
| 2,793
|
import os
import os.path
import sys
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Add apps and lib directories to PYTHONPATH
sys.path = [
ROOT,
os.path.join(ROOT, 'apps'),
] + sys.path
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "koedquiz.settings")
# This application object is used by the development server
# as well as any WSGI server configured to use this file.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
fnp/koed-quiz
|
koedquiz/wsgi.py
|
Python
|
agpl-3.0
| 495
|