code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from test_support import verbose, TestFailed
if verbose:
print "Testing whether compiler catches assignment to __debug__"
try:
compile('__debug__ = 1', '?', 'single')
except SyntaxError:
pass
import __builtin__
prev = __builtin__.__debug__
setattr(__builtin__, '__debug__', 'sure')
setattr(__builtin__, '__debug__', prev)
if verbose:
print 'Running tests on argument handling'
try:
exec 'def f(a, a): pass'
raise TestFailed, "duplicate arguments"
except SyntaxError:
pass
try:
exec 'def f(a = 0, a = 1): pass'
raise TestFailed, "duplicate keyword arguments"
except SyntaxError:
pass
try:
exec 'def f(a): global a; a = 1'
raise TestFailed, "variable is global and local"
except SyntaxError:
pass
if verbose:
print "testing complex args"
def comp_args((a, b)):
print a,b
comp_args((1, 2))
def comp_args((a, b)=(3, 4)):
print a, b
comp_args((1, 2))
comp_args()
def comp_args(a, (b, c)):
print a, b, c
comp_args(1, (2, 3))
def comp_args(a=2, (b, c)=(3, 4)):
print a, b, c
comp_args(1, (2, 3))
comp_args()
try:
exec 'def f(a=1, (b, c)): pass'
raise TestFailed, "non-default args after default"
except SyntaxError:
pass
if verbose:
print "testing bad float literals"
def expect_error(s):
try:
eval(s)
raise TestFailed("%r accepted" % s)
except SyntaxError:
pass
expect_error("2e")
expect_error("2.0e+")
expect_error("1e-")
expect_error("3-4e/21")
if verbose:
print "testing literals with leading zeroes"
def expect_same(test_source, expected):
got = eval(test_source)
if got != expected:
raise TestFailed("eval(%r) gave %r, but expected %r" %
(test_source, got, expected))
expect_error("077787")
expect_error("0xj")
expect_error("0x.")
expect_error("0e")
expect_same("0777", 511)
expect_same("0777L", 511)
expect_same("000777", 511)
expect_same("0xff", 255)
expect_same("0xffL", 255)
expect_same("0XfF", 255)
expect_same("0777.", 777)
expect_same("0777.0", 777)
expect_same("000000000000000000000000000000000000000000000000000777e0", 777)
expect_same("0777e1", 7770)
expect_same("0e0", 0)
expect_same("0000E-012", 0)
expect_same("09.5", 9.5)
expect_same("0777j", 777j)
expect_same("00j", 0j)
expect_same("00.0", 0)
expect_same("0e3", 0)
expect_same("090000000000000.", 90000000000000.)
expect_same("090000000000000.0000000000000000000000", 90000000000000.)
expect_same("090000000000000e0", 90000000000000.)
expect_same("090000000000000e-0", 90000000000000.)
expect_same("090000000000000j", 90000000000000j)
expect_error("090000000000000") # plain octal literal w/ decimal digit
expect_error("080000000000000") # plain octal literal w/ decimal digit
expect_error("000000000000009") # plain octal literal w/ decimal digit
expect_error("000000000000008") # plain octal literal w/ decimal digit
expect_same("000000000000007", 7)
expect_same("000000000000008.", 8.)
expect_same("000000000000009.", 9.)
# Verify treatment of unary minus on negative numbers SF bug #660455
expect_same("0xffffffff", -1)
expect_same("-0xffffffff", 1)
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/test/test_compile.py
|
Python
|
gpl-3.0
| 3,243
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import unittest2 as unittest
from nupic.frameworks.opf.metrics import getModule, MetricSpec
class OPFMetricsTest(unittest.TestCase):
DELTA = 0.01
VERBOSITY = 0
def testRMSE(self):
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedRMSE(self):
wrmse = getModule(MetricSpec("rmse", None, None,
{"verbosity": OPFMetricsTest.VERBOSITY, "window":3}))
gt = [9, 4, 4, 100, 44]
p = [0, 13, 4, 6, 7]
for gv, pv in zip(gt, p):
wrmse.addInstance(gv, pv)
target = 58.324
self.assertTrue (abs(wrmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testAAE(self):
aae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aae.addInstance(gt[i], p[i])
target = 6.0
self.assertTrue(abs(aae.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testTrivialAAE(self):
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(100)]
p = [i for i in range(100)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTrivialAccuracy(self):
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAAE (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuract(self):
"""Trivial AAE metric test"""
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(1000)]
p = [i for i in range(1000)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuracy(self):
"""Trivial Accuracy metric test"""
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAverageError (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(500, 1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMultistepAAE(self):
"""Multistep AAE metric test"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": 3}))
# Make each ground truth 1 greater than the prediction
gt = [i+1 for i in range(100)]
p = [{3: {i: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 1
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepAAEMultipleSteps(self):
"""Multistep AAE metric test, predicting 2 different step sizes"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": [3,6]}))
# Make each 3 step prediction +1 over ground truth and each 6 step
# prediction +0.5 over ground truth
gt = [i for i in range(100)]
p = [{3: {i+1: .7, 5: 0.3},
6: {i+0.5: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 0.75 # average of +1 error and 0.5 error
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbability(self):
"""Multistep with probabilities metric test"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps":3}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100
target = 283.35
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbabilityMultipleSteps(self):
"""Multistep with probabilities metric test, predicting 2 different step
sizes"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,
"errorMetric":"aae", "steps": [1,3]}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7},
1: {5: 1.0}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#(((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100) / 2
# / 2 because the 1-step prediction is 100% accurate
target = 283.35/2
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMovingMeanAbsoluteError(self):
"""Moving mean Average Absolute Error metric test"""
movingMeanAAE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"aae"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanAAE.addInstance(gt[i], p[i])
res.append(movingMeanAAE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanAAE.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingMeanRMSE(self):
"""Moving mean RMSE metric test"""
movingMeanRMSE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"rmse"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanRMSE.addInstance(gt[i], p[i])
res.append(movingMeanRMSE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanRMSE.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testMovingModeAverageError(self):
"""Moving mode Average Error metric test"""
movingModeAvgErr = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"avg_err"}))
#Should initiall assymptote to .5
#Then after 900 should go to 1.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeAvgErr.addInstance(gt[i], p[i])
res.append(movingModeAvgErr.getMetric()["value"])
#Make sure that there is no point where the average error is >.5
self.assertTrue(max(res[1:890]) == .5)
#Make sure that after the statistics switch the error goes to 1.0
self.assertTrue(min(res[891:])>=.5)
#Make sure that the statistics change is still noticeable while it is
#in the window
self.assertTrue(res[998]<1.0)
target = 1.0
self.assertTrue(abs(movingModeAvgErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingModeAccuracy(self):
"""Moving mode Accuracy metric test"""
movingModeACC = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"acc"}))
#Should initially asymptote to .5
#Then after 900 should go to 0.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeACC.addInstance(gt[i], p[i])
res.append(movingModeACC.getMetric()["value"])
#Make sure that there is no point where the average acc is <.5
self.assertTrue(min(res[1:899]) == .5)
#Make sure that after the statistics switch the acc goes to 0.0
self.assertTrue(max(res[900:])<=.5)
#Make sure that the statistics change is still noticeable while it
#is in the window
self.assertTrue(res[998]>0.0)
target = 0.0
self.assertTrue(abs(movingModeACC.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalars(self):
"""Two gram scalars test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, \
"window":100, "predictionField":"test",
"errorMetric":"acc"}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalarsStepsGreaterOne(self):
"""Two gram scalars test with step size other than 1"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,\
"window":100, "predictionField":"test",
"errorMetric":"acc", "steps": 2}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTwoGramStrings(self):
"""One gram string test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"acc",
"predictionField":"test"}))
# Sequences of "0", "1", "2", "3", "4", "0", "1", ...
gt = [str(i%5) for i in range(1000)]
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
# Make every 5th element random
newElem = 100
for i in range(5, 1000, 5):
gt[i] = str(newElem)
newElem += 20
res = []
for i in xrange(len(gt)):
if i==20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = .8
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedAAE(self):
"""Windowed AAE"""
waae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":1}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
waae.addInstance(gt[i], p[i])
target = 3.0
self.assertTrue( abs(waae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA, "Got %s" %waae.getMetric())
def testAccuracy(self):
"""Accuracy"""
acc = getModule(MetricSpec("acc", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.5
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAccuracy(self):
"""Windowed accuracy"""
acc = getModule(MetricSpec("acc", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.0
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testAverageError(self):
"""Ave Error"""
err = getModule(MetricSpec("avg_err", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [1, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = (2.0/3.0)
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAverageError(self):
"""Windowed Ave Error"""
err = getModule(MetricSpec("avg_err", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = 1.0
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testLongWindowRMSE(self):
"""RMSE"""
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testCustomErrorMetric(self):
customFunc = """def getError(pred,ground,tools):
return abs(pred-ground)"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc, "errorWindow":3}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aggErr = customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
# insure that addInstance returns the aggregate error - other
# uber metrics depend on this behavior.
self.assertEqual(aggErr, customEM.getMetric()["value"])
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
customFunc = """def getError(pred,ground,tools):
sum = 0
for i in range(min(3,tools.getBufferLen())):
sum+=abs(tools.getPrediction(i)-tools.getGroundTruth(i))
return sum/3"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
# Test custom error metric helper functions
# Test getPrediction
# Not-Windowed
storeWindow=4
failed = False
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == p[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if lookBack>=storeWindow-1:
pass
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == p[i-lookBack])
#Test getGroundTruth
#Not-Windowed
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == gt[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == gt[i-lookBack])
#Test getFieldValue
#Not-Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == t1[i-lookBack])
#Not-Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Test getBufferLen
#Not-Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == i+1)
#Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == min(i+1, 4))
#Test initialization edge cases
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"errorWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
if __name__ == "__main__":
unittest.main()
|
Petr-Kovalev/nupic-win32
|
tests/unit/py2/nupic/frameworks/opf/opf_metrics_test.py
|
Python
|
gpl-3.0
| 27,219
|
#!/usr/bin/python3
# Copyright 2016-2018 Francisco Pina Martins <f.pinamartins@gmail.com>
# This file is part of pyRona.
# pyRona is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyRona is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyRona. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
from sys import argv
import numpy as np
try:
import md_outlier_remover as mor
import plotters.general_plotter as gp
import file_parser as fp
from argparser import argument_parser
except ImportError:
import pyRona.md_outlier_remover as mor
import pyRona.plotters.general_plotter as gp
import pyRona.file_parser as fp
from pyRona.argparser import argument_parser
class RonaClass:
"""
Stores the RONA values for each covar
"""
POP_NAMES = []
def __init__(self, covar):
self.name = covar
self.pop_ronas = defaultdict(list)
self.corr_coef = {}
self.avg_ronas = []
self.stderr_ronas = []
def basic_stats(self, use_weights):
"""
Gets the average RONA and stdev per population for each associated
covariate. Stores the values in variables inside the class instance.
"""
if len(self.pop_ronas) > 1:
# Sort markers:
markers = sorted(list(self.corr_coef.keys()))
list_of_marker_values = np.array([self.pop_ronas[x] for x in
markers],
dtype=float)
corr_weights = np.array([self.corr_coef[x] for x in markers],
dtype=float)
for i in list_of_marker_values.T:
not_nans = ~np.isnan(i)
if use_weights is True:
if True in not_nans:
self.avg_ronas += [np.average(i[not_nans],
weights=corr_weights
[not_nans])]
else:
self.avg_ronas += [np.nan]
else:
self.avg_ronas += [np.average(i[not_nans])]
self.stderr_ronas += [np.std(i[not_nans])
/ np.sqrt(len(i[not_nans]))]
else:
self.avg_ronas = [x for x in self.pop_ronas.values()][0]
self.stderr_ronas = [0.0] * len(list(self.pop_ronas.values())[0])
def count_markers(self):
"""
Counts the number of markers in the instance.
"""
return len(self.pop_ronas)
def calculate_rona(marker_name, rona, present_covar, future_covar,
allele_freqs, plot, outliers, rtype):
"""
Calculates the "Risk of non adaptation" (RONA) of each popuation for a
given association.
Also plots the associations if requested.
"""
# Remove outliers
if outliers is True:
outlier_pos = mor.md_remove_outliers(present_covar, allele_freqs)
outlier_pos = np.array(outlier_pos, dtype='int')
for i in outlier_pos:
present_covar[i] = np.nan
future_covar[i] = np.nan
allele_freqs[i] = np.nan
rona.pop_names = np.delete(RonaClass.POP_NAMES, outlier_pos)
else:
rona.pop_names = RonaClass.POP_NAMES
# Calculate trendline:
not_nan = ~np.isnan(present_covar)
fit = np.polyfit(present_covar[not_nan], allele_freqs[not_nan], 1)
fit_fn = np.poly1d(fit)
# Get R²:
rona.corr_coef[marker_name] = np.corrcoef(present_covar[not_nan],
allele_freqs[not_nan])[1, 0] ** 2
for pres, fut, freq in zip(present_covar, future_covar, allele_freqs):
pres_distance = freq - fit_fn(pres)
fut_distance = freq - fit_fn(fut)
distance_diff = abs(pres_distance) - abs(fut_distance)
amplitude = max(allele_freqs) - min(allele_freqs)
if rtype == "diff":
rel_distance = distance_diff / amplitude
elif rtype == "absdiff":
rel_distance = abs(distance_diff) / amplitude
elif rtype == "dist":
rel_distance = abs(fut_distance)
rona.pop_ronas[marker_name] += [rel_distance]
if plot is not None:
gp.draw_individual_plots(present_covar, future_covar, rona,
marker_name, allele_freqs, fit_fn, plot)
def results_summary(ronas, use_weights):
"""
This function outputs a summary of the RONAS for each population and
covariate.
"""
pop_names = ronas[0].pop_names
for i, j in enumerate(pop_names):
if i == 0:
print("Covar\t%s" % "\t".join([x.name for x in ronas]))
print("#SNPs\t%s" % "\t".join([str(x.count_markers()) for x in
ronas]))
print("%s\t%s" % (j, "\t".join([str(x.avg_ronas[i]) for x in ronas])))
print("Min R^2\t%s" %
"\t".join([str(np.nanmin(list(x.corr_coef.values()))) for x in
ronas]))
print("Max R^2\t%s" %
"\t".join([str(np.nanmax(list(x.corr_coef.values()))) for x in
ronas]))
# if use_weights is True:
# means = [str(np.average(list(x.corr_coef.values()),
# weights=list(x.corr_coef.values()))) for x in
# ronas]
# else:
means = [str(np.nanmean(list(x.corr_coef.values()))) for x in ronas]
print("Average R^2\t%s" % "\t".join(means))
def ronas_filterer(ronas, use_weights, num_covars):
"""
Filters RONAS to remove immutable covars, and return only the top "n" most
represented covariables.
"""
sortable_representation = {}
for k, rona in ronas.items():
rona.basic_stats(use_weights)
sortable_representation[k] = len(rona.pop_ronas)
top_represented = sorted(sortable_representation,
key=sortable_representation.get,
reverse=True)[:num_covars]
top_ronas = [ronas[x] for x in top_represented]
return top_ronas
def main():
"""
Main function. Takes all the inputs as arguments and runs the remaining
functions of the program.
"""
if len(argv) < 2:
arg_list = ["-h"]
else:
arg_list = argv[1:]
arg = argument_parser(arg_list)
if arg.upstream == "baypass":
present_covariates = fp.parse_baypass_envfile(arg.present_covars_file)
future_covariates = fp.parse_baypass_envfile(arg.future_covars_file)
RonaClass.POP_NAMES = fp.popnames_parser(arg.popnames_file)
assocs = fp.baypass_summary_betai_parser(
arg.baypass_summary_betai_file,
arg.bayes_factor, arg.immutables)
al_freqs = fp.baypass_pij_parser(arg.baypass_pij_file, assocs)
elif arg.upstream == "lfmm":
present_covariates = fp.parse_lfmm_envfile(arg.present_covars_file)
future_covariates = fp.parse_lfmm_envfile(arg.future_covars_file)
assocs = fp.lfmm_results_parser(arg.lfmm_assoc_file,
arg.p_thres,
arg.immutables)
RonaClass.POP_NAMES, al_freqs = fp.lfmm_to_pop_allele_freqs(
arg.allele_freqs_file,
arg.present_covars_file,
assocs,
popnames=True)
ronas = {}
for assoc in assocs:
marker, covar = assoc
# Instanciate class
if covar not in ronas:
rona = RonaClass(covar)
else:
rona = ronas[covar]
calculate_rona(marker, rona, present_covariates[int(covar) - 1],
future_covariates[int(covar) - 1],
al_freqs[marker],
arg.plots, arg.outliers, arg.rtype)
ronas[covar] = rona
ronas = ronas_filterer(ronas, arg.use_weights, arg.num_covars)
results_summary(ronas, arg.use_weights)
gp.draw_rona_plot(ronas, arg.outfile)
if arg.map_filename is not None:
# The map plotting module is only imported if a map plot is requested.
# This is to be able to keep 'cartopy' as an optional dependency.
try:
import plotters.map_plotter as mapper
except ImportError:
import pyRona.plotters.map_plotter as mapper
mapper.map_plotter(ronas, present_covariates[1], present_covariates[0],
arg.map_filename)
if __name__ == "__main__":
main()
|
StuntsPT/pyRona
|
pyRona/pyRona.py
|
Python
|
gpl-3.0
| 8,988
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import shutil
import os
from distutils.core import setup
import switchscreen
shutil.copyfile("switchscreen.py", "switchscreen")
setup(
name = "switchscreen",
version = switchscreen.__version__,
description = "",
author = u"Régis FLORET",
author_mail = "regisfloret@gmail.com",
url = "http://code.google.com/p/switchscreen/",
scripts = [
'switchscreen',
]
)
os.remove("switchscreen")
|
regisf/switchscreen
|
setup.py
|
Python
|
gpl-3.0
| 475
|
#!/usr/bin/python
from __future__ import unicode_literals
import os
def wait():
raw_input('\nPress Enter to continue...\n\n')
os.system(['clear', 'cls'][os.name == 'nt'])
# Create a class to handle items in a wallet
class BaseWalletHandler(object):
def __init__(self):
self.items = {
'Driver\'s License': False,
'Credit Card': False,
'Cash': False,
'Change': False,
'Insurance Card': False,
'ICE Info': False,
'Pictures': False,
}
def add_item(self, item):
if item in self.items.keys():
self.items[item] = True
def remove_item(self, item):
if item in self.items.keys():
self.items[item] = False
def show_items(self):
for key, value in self.items.items():
if value is True:
print key
# Can more refactoring happen to clean this up more?
class WalletHandler(BaseWalletHandler):
def __init__(self):
super(WalletHandler, self).__init__()
def add_item(self, item):
super(WalletHandler, self).add_item(item)
if item not in self.items.keys():
self.items[item] = True
def exercise():
wallet_handler = BaseWalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
wallet_handler = WalletHandler()
wallet_handler.add_item('Driver\'s License')
wallet_handler.add_item('ICE Info')
wallet_handler.add_item('Credit Card')
wallet_handler.add_item('Business Card')
wallet_handler.show_items()
wait()
if __name__=='__main__':
exercise()
|
ricomoss/learn-tech
|
python/track_1/lesson4/exercise.py
|
Python
|
gpl-3.0
| 1,793
|
# This file is part of Pooky.
# Copyright (C) 2013 Fcrh <coquelicot1117@gmail.com>
#
# Pooky is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pooky is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pooky. If not, see <http://www.gnu.org/licenses/>.
from PyQt4 import QtGui
class SingletonWidget(QtGui.QWidget):
__instance = None
def __init__(self, *args):
super().__init__(*args)
if self.__class__.__instance is not None:
raise RuntimeError("Singleton check failed.")
else:
self.__class__.__instance = self
class Palette(SingletonWidget):
def __init__(self, *args):
super().__init__(*args)
class Preference(SingletonWidget):
def __init__(self, *args):
super().__init__(*args)
QtGui.QLabel('Almost Empty XD.', self)
self.resize(640, 480)
self.setWindowTitle('Preference')
class About(SingletonWidget):
def __init__(self, *args):
super().__init__(*args)
mainlayout = QtGui.QVBoxLayout()
mainlayout.addWidget(self.initContent(), True)
mainlayout.addLayout(self.initButtonLayout(), True)
self.setLayout(mainlayout)
self.setWindowTitle('About Pooky')
self.adjustSize()
def initButtonLayout(self):
btnlayout = QtGui.QHBoxLayout()
licenseBtn = QtGui.QPushButton('License')
def licenseCallBack():
raise RuntimeError("Not implement yet.")
licenseBtn.pressed.connect(licenseCallBack)
btnlayout.addWidget(licenseBtn)
closeBtn = QtGui.QPushButton('Close')
def closeCallBack():
self.lower()
self.hide()
closeBtn.pressed.connect(closeCallBack)
btnlayout.addWidget(closeBtn)
return btnlayout
def initContent(self):
return QtGui.QWidget()
|
coquelicot/Pooky
|
pooky/Widgets.py
|
Python
|
gpl-3.0
| 2,289
|
# This Python file uses the following encoding: utf-8
# This file is part of InputShare
#
# Copyright © 2015 Patrick VanDusen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
input-share/input-share
|
core.py
|
Python
|
gpl-3.0
| 783
|
# encoding: utf-8
import sys
import os
import signal
from openpyxl.utils import get_column_letter
from openpyxl import Workbook,load_workbook
ItemList=[]
## {{{ http://code.activestate.com/recipes/410692/ (r8)
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
class Items:
def __init__(self, BallName,GPIO):
self.BallName = BallName
self.GPIO = GPIO
self.NetName = None
self.Direction = None
self.Data = None
self.Position = None
def set_NetName(self, NetName):
self.NetName=NetName
def set_Direction(self, Direction):
self.Direction=Direction
def set_Data(self, Data):
self.Data=Data
def set_Position(self, Position):
self.Position=Position
def GetCellValue(ws,index_row,column_letter):
return ws[column_letter+str(index_row)].value
def GetColumnNameAndChangeValue(ws,string_row,changelist,index_row):
string=[]
for i in range(0,len(changelist)):
column_letter = changelist[i]
string.append(str(GetCellValue(ws,string_row,column_letter))+' to ['+str(GetCellValue(ws,index_row,column_letter))+'] ')
return "".join(string)
def GetNumAndName(ws,index_row):
return '['+GetCellValue(ws,index_row,'D')+'] '+GetCellValue(ws,index_row,'C')+' : '
def GetColumnLetter(ws,string_row,string_value):
for column in range(1,40):
column_letter = get_column_letter(column)
if ws[column_letter+str(string_row)].value==string_value:
return column_letter
return None
def Get_Bit(byteval,idx):
return ((byteval&(1<<idx))!=0);
def AppendBit(data_L,data_M):
output_str=""
if data_L != 0:
for i in range(0, 8):
if Get_Bit(int(data_L,16),i) == True:
output_str=output_str+str(i)+"/"
if data_M != 0:
for i in range(0, 8):
if Get_Bit(int(data_M,16),i) == True:
output_str=output_str+str(i+8)+"/"
if data_L != 0 or data_M != 0:
output_str=output_str+"\n"
return output_str
def StringToSignint(string,len):
x = int(string,16)
if x > ((1<<(8*len))/2)-1:
x -= 1<<(8*len)
return x
def ExcelToStruct(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
print ("clear All data in excel")
tmp_row=index_row
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(tmp_row)].value
if BallName==None:
break;
for row in ws['C'+str(tmp_row)+':G'+str(tmp_row)]:
for cell in row:
cell.value = None
tmp_row = tmp_row+1;
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
GPIOPPin=ws[GetColumnLetter(ws,1,'GPIO')+str(index_row)].value
if GPIOPPin!=None:
ItemList.append(Items(BallName,GPIOPPin))
index_row = index_row+1;
wb.save(filename)
def StructToExcel(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
for item in ItemList:
if item.BallName!=None and item.NetName !=None and BallName.strip() == item.BallName.strip():
ws[GetColumnLetter(ws,1,'NetName')+str(index_row)] = item.NetName
index_row = index_row+1;
wb.save(filename)
def FindBallNameAppend(BallName,Position):
for item in ItemList:
if BallName.strip() == item.BallName.strip():
item.set_Position(Position)
def FindPositionAppend(Position,SIG_NAME):
if SIG_NAME.find("\g")!=-1:
return
for item in ItemList:
if xstr(Position).strip() == xstr(item.Position).strip():
item.set_NetName(SIG_NAME)
def xstr(s):
if s is None:
return ''
return str(s)
def CheckEmptyNetName():
for item in ItemList:
if item.NetName is None:
item.set_NetName("NOT_CONNECT_"+item.GPIO[4:])
def PrintItemList():
for item in ItemList:
print (xstr(item.BallName)+" "+xstr(item.GPIO)+" "+xstr(item.Position)+" "+xstr(item.NetName))
|
Always0806/GPIOTableGen
|
Util.py
|
Python
|
gpl-3.0
| 4,608
|
from math import sqrt
def euclidean_distance(p1, p2):
"""
Compute euclidean distance for two points
:param p1:
:param p2:
:return:
"""
dx, dy = p2[0] - p1[0], p2[1] - p1[1]
# Magnitude. Coulomb law.
return sqrt(dx ** 2 + dy ** 2)
|
dsaldana/roomba_sensor_network
|
roomba_sensor/src/roomba_sensor/util/geo.py
|
Python
|
gpl-3.0
| 268
|
# -*- coding: utf-8 -*-
"""
# Copyright
Copyright (C) 2012 by Victor
victor@caern.de
# License
This file is part of SoulCreator.
SoulCreator is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
SoulCreator is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
SoulCreator. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
def gender_symbol(gender):
"""
Gibt das Symbol für das Übergebene Geschlecht aus.
"""
if gender.lower() == "female" or gender.lower() == "m" or gender.lower() == "w":
return "♀"
elif gender.lower() == "male" or gender.lower() == "m":
return "♂"
else:
return "⚥"
|
GoliathLeviathan/SoulCreator
|
src/Tools/ImageTools.py
|
Python
|
gpl-3.0
| 1,059
|
import copy
from nive.utils.dataPool2.mysql.tests import test_MySql
try:
from nive.utils.dataPool2.mysql.mySqlPool import *
except:
pass
from . import test_db
from nive.utils.dataPool2.sqlite.sqlite3Pool import *
mode = "mysql"
printed = [""]
def print_(*kw):
if type(kw)!=type(""):
v = ""
for a in kw:
v += " "+str(a)
else:
v = kw
if v == "":
print(".",)
printed.append("")
else:
printed[-1] += v
def getConnection():
if mode == "mysql":
c = MySqlConn(test_MySql.conn, 0)
print_( "MySQL -")
elif mode == "mysqlinno":
c = test_MySql.conn
c["dbName"] = "ut_dataPool2inno"
c = MySqlConn(c, 0)
print_( "MySQL InnoDB -")
else:
c = Sqlite3Conn(test_db.conn, 0)
print_( "Sqlite 3 -")
return c
def getPool():
if mode == "mysql":
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
pool.CreateConnection(test_MySql.conn)
print_( "MySQL -")
elif mode == "mysqlinno":
pool = MySql(test_MySql.conf)
pool.SetStdMeta(copy.copy(test_MySql.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_MySql.struct)
c = test_MySql.conn
c["dbName"] = "ut_dataPool2inno"
pool.CreateConnection(c)
print_( "MySQL InnoDB -")
else:
pool = Sqlite3(test_db.conf)
pool.SetStdMeta(copy.copy(test_db.stdMeta))
pool.GetPoolStructureObj().SetStructure(test_db.struct)
pool.CreateConnection(test_db.conn)
print_( "Sqlite 3 -")
return pool
def empty():
#if mode == "mysql":
# test_MySql.emptypool()
#elif mode == "mysqlinno":
# test_MySql.emptypool()
#else:
# t_db.emptypool()
pass
def connects(n):
c = getConnection()
print_( "Connection: ")
t = time.time()
for i in range(0,n):
c.connect()
c.Close()
t2 = time.time()
print_( n, " connects in ", t2-t, "secs. ", (t2-t)/n, " per connect")
print_()
def cursors(n):
c = getConnection()
c.connect()
print_( "Cursor: ")
t = time.time()
for i in range(0,n):
cu = c.cursor()
cu.close()
t2 = time.time()
c.Close()
print_( n, " cursors in ", t2-t, "secs. ", (t2-t)/n, " per cursor")
print_()
def createsql(n):
pool = getPool()
print_( "Create SQL: ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"pool_type": "data1", "ftext": "", "fnumber": 3},
sort = "title, id, fnumber",
ascending = 0,
dataTable = "data1",
operators={"pool_type":"=", "ftext": "<>", "fnumber": ">"},
start=1,
max=123)
t2 = time.time()
pool.Close()
print_( n, " sql statements in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery1(n, start):
pool = getPool()
print_( "SQL Query data+meta (join no index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"pool_type": "data1", "ftext": "123", "fnumber": i+start},
sort = "title, id, fnumber",
ascending = 0,
dataTable = "data1",
operators={"pool_type":"=", "ftext": "LIKE", "fnumber": "!="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery2(n, start):
pool = getPool()
print_( "SQL Query data+meta=id (join index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta)+list(test_MySql.struct["data1"]),
{"id": i+start},
sort = "title",
ascending = 0,
dataTable = "data1",
operators={},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery3(n, start):
pool = getPool()
print_( "SQL Query meta=id (index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i},
sort = "id",
ascending = 0,
dataTable = "pool_meta",
singleTable = 1,
operators={},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery4(n, start):
pool = getPool()
print_( "SQL Query meta=id+pool_type=data1 (index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i, "pool_type": "data1"},
sort = "id",
ascending = 0,
dataTable = "pool_meta",
singleTable = 1,
operators={"pool_type": "="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery5(n, start):
pool = getPool()
print_( "SQL Query meta=id+pool_type=data1+data.funit (join index): ")
t = time.time()
for i in range(0,n):
sql, values = pool.FmtSQLSelect(list(test_MySql.stdMeta),
{"id": start+i, "pool_type": "data1", "funit": 35},
sort = "id",
ascending = 0,
dataTable = "data1",
operators={"pool_type": "="},
start=1,
max=123)
pool.Query(sql, values)
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def sqlquery6(n):
pool = getPool()
print_( "SQL Query filename (text index): ")
t = time.time()
for i in range(0,n):
files = pool.SearchFilename("file1xxx.txt")
t2 = time.time()
pool.Close()
print_( n, " queries in ", t2-t, "secs. ", (t2-t)/n, " per statement")
print_()
def createentries(n):
pool = getPool()
print_( "Create entries (nodb): ")
t = time.time()
for i in range(0,n):
e=pool._GetPoolEntry(i, version=None, datatbl="data1", preload="skip", virtual=True)
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def checkentries(n):
pool = getPool()
print_( "Create entries (nodb) and check exists: ")
t = time.time()
for i in range(0,n):
e=pool._GetPoolEntry(i, version=None, datatbl="data1", preload="skip", virtual=True)
e.Exists()
t2 = time.time()
pool.Close()
print_( n, " checks in ", t2-t, "secs. ", (t2-t)/n, " per check")
print_()
def createentries2(n):
pool = getPool()
print_( "Create entries (nodata): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
#e.data.update(data1_1)
#e.meta.update(meta1)
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def createentries3(n):
pool = getPool()
print_( "Create entries (data+meta): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
if i==0: id = e.GetID()
e.data.update(test_MySql.data1_1)
e.meta.update(test_MySql.meta1)
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
return id
def createentries4(n):
pool = getPool()
print_( "Create entries (data+meta+file): ")
t = time.time()
for i in range(0,n):
e=pool.CreateEntry("data1")
if i==0: id = e.GetID()
e.data.update(test_MySql.data1_1)
e.meta.update(test_MySql.meta1)
e.CommitFile("file1", {"file":test_MySql.file1_1, "filename": "file1.txt"})
e.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
return id
def getentries1(n, start):
pool = getPool()
print_( "Get entries (all): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start)
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries2(n, start):
pool = getPool()
print_( "Get entries (all+file): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="all")
f=e.GetFile("file1")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries5(n, start):
pool = getPool()
print_( "Get entries (all+filestream): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="all")
#f=e.GetFile("file1")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getentries4(n, start):
pool = getPool()
print_( "Get entries (meta): ")
t = time.time()
for i in range(0,n):
e=pool.GetEntry(i+start, preload="meta")
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch1(n, start):
pool = getPool()
print_( "Get batch (no preload): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="skip")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch2(n, start):
pool = getPool()
print_( "Get batch (meta): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="meta")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def getbatch3(n, start):
pool = getPool()
print_( "Get batch (all): ")
t = time.time()
ids = []
for i in range(0,n):
ids.append(i+start)
e=pool.GetBatch(ids, preload="all")
t2 = time.time()
del e
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def delentries(n, start):
pool = getPool()
print_( "Delete entries (meta+data): ")
t = time.time()
for i in range(0,n):
pool.DeleteEntry(i+start)
pool.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def delentries2(n, start):
pool = getPool()
print_( "Delete entries (meta+data+file): ")
t = time.time()
for i in range(0,n):
pool.DeleteEntry(i+start)
pool.Commit()
t2 = time.time()
pool.Close()
print_( n, " entries in ", t2-t, "secs. ", (t2-t)/n, " per entry")
print_()
def report(modes, printed):
rep=[]
c = len(printed)/len(modes)
for n in range(0, c):
p = 0
for m in modes:
rep.append(printed[p*c+n])
p+=1
print()
print()
i=0
for p in rep:
print(p)
i+=1
if i==len(modes):
print()
i=0
def run(modes):
global mode , printed
n = 1000
printed = [""]
for m in modes:
mode = m
print()
print(mode,)
empty()
connects(n)
cursors(n)
createsql(n)
createentries(n)
checkentries(n)
createentries2(n)
id = createentries3(n)
id2 = createentries4(n)
getentries1(n, id2)
getentries2(n, id2)
getentries5(n, id2)
getentries4(n, id2)
getbatch1(n, id2)
getbatch2(n, id2)
getbatch3(n, id2)
sqlquery1(n, id2)
sqlquery2(n, id)
sqlquery3(n, id2)
sqlquery4(n, id)
sqlquery5(n, id2)
sqlquery6(n)
delentries(n, id)
delentries2(n, id2)
report(modes, printed)
if __name__ == '__main__':
#run(("sqlite3",))
run(("sqlite3","mysql","mysqlinno"))
|
nive/nive
|
nive/utils/dataPool2/tests/performance_test.py
|
Python
|
gpl-3.0
| 12,761
|
#!usr/bin/env python
from pyspace.planet import PlanetArray
from pyspace.simulator import BarnesSimulator
import numpy
x = numpy.array([0,100])
y = numpy.array([0,0])
z = numpy.array([0,0])
m = numpy.array([1000,1])
v_y = numpy.array([0,(1000/100)**0.5])
pa = PlanetArray(x, y, z, v_y=v_y, m=m)
sim = BarnesSimulator(pa, 1, 1, 0, sim_name = "two_planets")
sim.simulate(1000, dump_output = True)
|
adityapb/pyspace
|
examples/barnes_two_planets.py
|
Python
|
gpl-3.0
| 402
|
import unittest
import upload.injectionContainer as injectionContainer
from upload.strategy.dummys.injectedContainerDummy import ContainerMock
class TestRequestParams(unittest.TestCase):
"""
Class test for request params class
"""
def test_open_file_error(self):
"""
test case secured upload
"""
injectionContainer.Container.update(
ContainerMock().container()
)
from upload.shared \
import open_file
with self.assertRaises(FileNotFoundError):
open_file.execute('FailedTest', 'r')
if __name__ == '__main__':
unittest.main()
|
acostasg/upload
|
upload/tests/shared/test_open_file.py
|
Python
|
gpl-3.0
| 642
|
from datetime import timedelta, date
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
start_date = date(2017, 9, 30)
end_date = date(2017, 10, 23)
for single_date in daterange(start_date, end_date):
print './neg_runner.sh', single_date.strftime("%Y-%m-%d")
print './pos_runner.sh', single_date.strftime("%Y-%m-%d")
|
ItsLastDay/academic_university_2016-2018
|
subjects/BigData/hw01/fourth_group/gen.py
|
Python
|
gpl-3.0
| 409
|
from nose.tools import assert_raises
from endicia.builders.ChangePassPhraseXmlBuilder import ChangePassPhraseXmlBuilder
from endicia.builders.EndiciaXmlBuilder import ValueToLongError
def test_ChangePassPhraseXmlBuilder_invalid_values():
"""ChangePassPhraseXmlBuilder should raise ValueToLongError when each value is pass a value longer than required"""
builder = ChangePassPhraseXmlBuilder()
assert_raises( ValueToLongError, builder.setPartnerID, "123456789012345678901234567890123456789012345678901" )
assert_raises( ValueToLongError, builder.setRequestID, "123456789012345678901234567890123456789012345678901" )
assert_raises( ValueToLongError, builder.setAccountID, "1234567" )
assert_raises( ValueToLongError, builder.setPassPhrase, "12345678901234567890123456789012345678901234567890123456789012345" )
|
streed/PyEndicia
|
endicia/test/builders/test_ChangePassPhraseXmlBuilder.py
|
Python
|
gpl-3.0
| 817
|
import csv
import pickle
import datetime
import os
import urllib.request, urllib.parse, urllib.error
from django.core.cache import cache
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.db.models import Q
from django.db.models.aggregates import Max
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils.html import mark_safe
from django.conf import settings
from coredata.models import Member, CourseOffering, Person, Semester, Role
from courselib.auth import ForbiddenResponse, NotFoundResponse, is_course_student_by_slug
from courselib.auth import is_course_staff_by_slug, requires_course_staff_by_slug
from courselib.search import find_member
from forum.models import Forum
from grades.models import all_activities_filter
from grades.models import Activity, NumericActivity, LetterActivity, CalNumericActivity, GradeHistory
from grades.models import NumericGrade, LetterGrade
from grades.models import CalLetterActivity, ACTIVITY_TYPES, FLAGS
from grades.models import neaten_activity_positions
from grades.forms import NumericActivityForm, LetterActivityForm, CalNumericActivityForm, MessageForm
from grades.forms import ActivityFormEntry, FormulaFormEntry, StudentSearchForm, FORMTYPE
from grades.forms import GROUP_STATUS_MAP, CourseConfigForm, CalLetterActivityForm, CutoffForm
from grades.formulas import EvalException, activities_dictionary, eval_parse
from grades.utils import reorder_course_activities
from grades.utils import ORDER_TYPE, FormulaTesterActivityEntry, FakeActivity, FakeEvalActivity
from grades.utils import generate_numeric_activity_stat,generate_letter_activity_stat
from grades.utils import ValidationError, calculate_numeric_grade, calculate_letter_grade
from marking.models import get_group_mark, StudentActivityMark, GroupActivityMark, ActivityComponent
from groups.models import GroupMember, add_activity_to_group
from quizzes.models import Quiz
from submission.models import SubmissionComponent, GroupSubmission, StudentSubmission, SubmissionInfo, select_all_submitted_components, select_all_components
from log.models import LogEntry
from pages.models import Page, ACL_ROLES
from dashboard.models import UserConfig, NewsItem
from dashboard.photos import pre_fetch_photos, photo_for_view
from discuss import activity as discuss_activity
FROMPAGE = {'course': 'course', 'activityinfo': 'activityinfo', 'activityinfo_group' : 'activityinfo_group'}
# Only for display purpose.
ACTIVITY_TYPE = {'NG': 'Numeric Graded', 'LG': 'Letter Graded',
'CNG': 'Calculated Numeric Graded', 'CLG': 'Calculated Letter Graded'}
@login_required
def course_info(request, course_slug):
if is_course_student_by_slug(request, course_slug):
return _course_info_student(request, course_slug)
elif is_course_staff_by_slug(request, course_slug):
return _course_info_staff(request, course_slug)
else:
return ForbiddenResponse(request)
@requires_course_staff_by_slug
def reorder_activity(request, course_slug):
"""
Ajax way to reorder activity.
This ajax view function is called in the course_info page.
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
if request.method == 'POST':
neaten_activity_positions(course)
# find the activities in question
id_up = request.POST.get('id_up')
id_down = request.POST.get('id_down')
if id_up == None or id_down == None:
return ForbiddenResponse(request)
# swap the position of the two activities
activity_up = get_object_or_404(Activity, id=id_up, offering__slug=course_slug)
activity_down = get_object_or_404(Activity, id=id_down, offering__slug=course_slug)
activity_up.position, activity_down.position = activity_down.position, activity_up.position
activity_up.save()
activity_down.save()
return HttpResponse("Order updated!")
return ForbiddenResponse(request)
def _course_info_staff(request, course_slug):
"""
Course front page
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
member = Member.objects.get(offering=course, person__userid=request.user.username, role__in=['INST','TA','APPR'])
activities = all_activities_filter(offering=course)
any_group = True in [a.group for a in activities]
try:
forum = Forum.objects.get(offering=course)
forum_enabled = forum.enabled
except Forum.DoesNotExist:
forum_enabled = False
# Non Ajax way to reorder activity, please also see reorder_activity view function for ajax way to reorder
order = None
act = None
if 'order' in request.GET:
order = request.GET['order']
if 'act' in request.GET:
act = request.GET['act']
if order and act:
reorder_course_activities(activities, act, order)
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))
# Todo: is the activity type necessary?
activities_info = []
total_percent = 0
for activity in activities:
if activity.percent:
total_percent += activity.percent
if isinstance(activity, NumericActivity):
activities_info.append({'activity':activity, 'type':ACTIVITY_TYPE['NG']})
elif isinstance(activity, LetterActivity):
activities_info.append({'activity':activity, 'type':ACTIVITY_TYPE['LG']})
if len(activities) == 0:
num_pages = Page.objects.filter(offering=course)
if num_pages == 0:
messages.info(request, "Students won't see this course in their menu on the front page. As soon as some activities or pages have been added, they will see a link to the course info page.")
discussion_activity = False
if course.discussion:
discussion_activity = discuss_activity.recent_activity(member)
# advertise combined offering if applicable.
offer_combined = course.joint_with() and len(activities) == 0
context = {'course': course, 'member': member, 'activities_info': activities_info, 'from_page': FROMPAGE['course'],
'order_type': ORDER_TYPE, 'any_group': any_group, 'total_percent': total_percent, 'discussion_activity': discussion_activity,
'offer_combined': offer_combined, 'forum_enabled': forum_enabled}
return render(request, "grades/course_info_staff.html", context)
@requires_course_staff_by_slug
def course_config(request, course_slug):
from forum.models import Forum
course = get_object_or_404(CourseOffering, slug=course_slug)
try:
forum = Forum.objects.get(offering=course)
except Forum.DoesNotExist:
forum = Forum(offering=course)
forum.enabled = False
if request.method=="POST":
form = CourseConfigForm(request.POST)
if form.is_valid():
course.set_url(form.cleaned_data['url'])
course.set_taemail(form.cleaned_data['taemail'])
#if course.uses_svn():
# course.set_indiv_svn(form.cleaned_data['indiv_svn'])
# course.set_instr_rw_svn(form.cleaned_data['instr_rw_svn'])
course.set_group_min(form.cleaned_data['group_min'])
course.set_group_max(form.cleaned_data['group_max'])
course.save()
forum.enabled = form.cleaned_data['forum']
forum.identity = form.cleaned_data['forum_identity']
forum.save()
messages.success(request, 'Course config updated')
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("updated config for %s") % (course),
related_object=course)
l.save()
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))
else:
form = CourseConfigForm({'url': course.url(), 'taemail': course.taemail(), 'forum': forum.enabled, 'forum_identity': forum.identity,
'indiv_svn': course.indiv_svn(), 'instr_rw_svn': course.instr_rw_svn(), 'group_min': course.group_min(),'group_max': course.group_max()})
context = {'course': course, 'form': form}
return render(request, "grades/course_config.html", context)
#@requires_course_student_by_slug
def _course_info_student(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities = all_activities_filter(offering=course)
activities = [a for a in activities if a.status in ['RLS', 'URLS']]
any_group = True in [a.group for a in activities]
has_index = bool(Page.objects.filter(offering=course, label="Index", can_read__in=ACL_ROLES['STUD']))
try:
forum = Forum.objects.get(offering=course)
forum_enabled = forum.enabled
except Forum.DoesNotExist:
forum_enabled = False
activity_data = []
student = Member.objects.get(offering=course, person__userid=request.user.username, role='STUD')
for activity in activities:
data = {}
data['act'] = activity
data['grade_display'] = activity.display_grade_student(student.person)
activity_data.append(data)
discussion_activity = False
member = Member.objects.get(offering=course, person__userid=request.user.username, role='STUD')
if course.discussion:
discussion_activity = discuss_activity.recent_activity(member)
context = {'course': course, 'member': student, 'activity_data': activity_data, 'any_group': any_group,
'has_index': has_index, 'from_page': FROMPAGE['course'], 'discussion_activity': discussion_activity,
'forum_enabled': forum_enabled}
return render(request, "grades/course_info_student.html", context)
@login_required
def activity_info_oldurl(request, course_slug, activity_slug, tail):
"""
Redirect old activity URLs to new (somewhat intelligently: don't redirect if there's no activity there)
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(Activity, slug=activity_slug, offering=course)
act_url = reverse('offering:activity_info', kwargs={'course_slug': course.slug, 'activity_slug': activity.slug})
return HttpResponseRedirect(act_url + tail)
@login_required
def activity_info(request, course_slug, activity_slug):
if is_course_student_by_slug(request, course_slug):
return _activity_info_student(request, course_slug, activity_slug)
elif is_course_staff_by_slug(request, course_slug):
return _activity_info_staff(request, course_slug, activity_slug)
else:
return ForbiddenResponse(request)
def _activity_info_staff(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities = all_activities_filter(slug=activity_slug, offering=course)
if len(activities) != 1:
return NotFoundResponse(request)
activity = activities[0]
quiz = Quiz.objects.filter(activity=activity).first()
# build list of all students and grades
students = Member.objects.filter(role="STUD", offering=activity.offering).select_related('person')
if activity.is_numeric():
grades_list = activity.numericgrade_set.filter().select_related('member__person', 'activity')
else:
grades_list = activity.lettergrade_set.filter().select_related('member__person', 'activity')
grades = {}
for g in grades_list:
grades[g.member.person.userid_or_emplid()] = g
source_grades = {}
if activity.is_calculated() and not activity.is_numeric():
# calculated letter needs source grades too
source_list = activity.numeric_activity.numericgrade_set.filter().select_related('member__person', 'activity')
for g in source_list:
source_grades[g.member.person.userid_or_emplid()] = g
# collect group membership info
group_membership = {}
if activity.group:
gms = GroupMember.objects.filter(activity_id=activity.id, confirmed=True).select_related('group', 'student__person', 'group__courseoffering')
for gm in gms:
group_membership[gm.student.person.userid_or_emplid()] = gm.group
# collect submission status
sub_comps = [sc.title for sc in SubmissionComponent.objects.filter(activity_id=activity.id, deleted=False)]
submitted = {}
if activity.group:
subs = GroupSubmission.objects.filter(activity_id=activity.id).select_related('group')
for s in subs:
members = s.group.groupmember_set.filter(activity_id=activity.id)
for m in members:
submitted[m.student.person.userid_or_emplid()] = True
else:
subs = StudentSubmission.objects.filter(activity_id=activity.id)
for s in subs:
submitted[s.member.person.userid_or_emplid()] = True
if bool(sub_comps) and not bool(activity.due_date):
messages.warning(request, 'Students will not be able to submit: no due date/time is set.')
# collect marking status
mark_comps = [ac.title for ac in ActivityComponent.objects.filter(numeric_activity_id=activity.id, deleted=False)]
marked = {}
marks = StudentActivityMark.objects.filter(activity_id=activity.id).select_related('numeric_grade__member__person')
for m in marks:
marked[m.numeric_grade.member.person.userid_or_emplid()] = True
if activity.group:
# also collect group marks: attribute to both the group and members
marks = GroupActivityMark.objects.filter(activity_id=activity.id).select_related('group')
for m in marks:
marked[m.group.slug] = True
members = m.group.groupmember_set.filter(activity_id=activity.id).select_related('student__person')
for m in members:
marked[m.student.person.userid_or_emplid()] = True
context = {'course': course, 'activity': activity, 'students': students, 'grades': grades, 'source_grades': source_grades,
'activity_view_type': 'individual', 'group_membership': group_membership,
'from_page': FROMPAGE['activityinfo'],
'sub_comps': sub_comps, 'mark_comps': mark_comps,
'submitted': submitted, 'marked': marked,
'quiz': quiz,
}
return render(request, 'grades/activity_info.html', context)
def _activity_info_student(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities = all_activities_filter(slug=activity_slug, offering=course)
if len(activities) != 1:
return NotFoundResponse(request)
activity = activities[0]
if activity.status=="INVI":
return NotFoundResponse(request)
student = Member.objects.get(offering=course, person__userid=request.user.username, role='STUD')
grade = (activity.GradeClass).objects.filter(activity_id=activity.id, member=student)
if activity.status != "RLS" or not grade:
# shouldn't display or nothing in database: create temporary nograde object for the template
grade = (activity.GradeClass)(activity_id=activity.id, member=student, flag="NOGR")
else:
grade = grade[0]
# only display summary stats for courses with at least STUD_NUM_TO_DISP_ACTSTAT grades received
reason_msg = ''
if activity.is_numeric():
activity_stat, reason_msg = generate_numeric_activity_stat(activity, 'STUD')
else:
activity_stat, reason_msg = generate_letter_activity_stat(activity, 'STUD')
try:
quiz = activity.quiz
completed = quiz.completed(student)
incomplete_quiz = not completed
reviewable_quiz = completed and (quiz.review != 'none') and (activity.status == 'RLS')
except Quiz.DoesNotExist:
incomplete_quiz = False
reviewable_quiz = False
context = {'course': course, 'activity': activity, 'grade': grade,
'activity_stat': activity_stat, 'reason_msg': reason_msg,
'incomplete_quiz': incomplete_quiz,
'reviewable_quiz': reviewable_quiz,
}
resp = render(request, 'grades/activity_info_student.html', context)
resp.allow_gstatic_csp = True
return resp
@requires_course_staff_by_slug
def activity_info_with_groups(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug = course_slug)
activities = all_activities_filter(slug=activity_slug, offering=course)
if len(activities) != 1:
return NotFoundResponse(request)
activity = activities[0]
if not activity.group:
return NotFoundResponse(request)
# build list of group grades information
all_members = GroupMember.objects.select_related('group', 'student__person', 'group__courseoffering').filter(activity = activity, confirmed = True)
groups_found = {}
grouped_students = 0
for member in all_members:
grouped_students += 1
group = member.group
student = member.student
if group.id not in groups_found:
# a new group discovered by its first member
# get the current grade of the group
current_mark = get_group_mark(activity, group)
value = 'no grade' if current_mark is None else current_mark.mark
new_group_grade_info = {'group': group, 'members': [student], 'grade': value}
groups_found[group.id] = new_group_grade_info
else:
# add this member to its corresponding group info
group_grade_info = groups_found[group.id]
group_grade_info['members'].append(student)
ungrouped_students = Member.objects.filter(offering = course, role = 'STUD').count() - grouped_students
# collect submission status
submitted = {}
subs = GroupSubmission.objects.filter(activity_id=activity.id).select_related('group')
for s in subs:
submitted[s.group.slug] = True
if isinstance(activity, NumericActivity):
activity_type = ACTIVITY_TYPE['NG']
elif isinstance(activity, LetterActivity):
activity_type = ACTIVITY_TYPE['LG']
# more activity info for display
sub_comps = [sc.title for sc in SubmissionComponent.objects.filter(activity_id=activity.id, deleted=False)]
mark_comps = [ac.title for ac in ActivityComponent.objects.filter(numeric_activity_id=activity.id, deleted=False)]
context = {'course': course, 'activity_type': activity_type,
'activity': activity, 'ungrouped_students': ungrouped_students,
'activity_view_type': 'group',
'group_grade_info_list': list(groups_found.values()), 'from_page': FROMPAGE['activityinfo_group'],
'sub_comps': sub_comps, 'mark_comps': mark_comps,
'submitted': submitted}
return render(request, 'grades/activity_info_with_groups.html', context)
@requires_course_staff_by_slug
def activity_stat(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities = all_activities_filter(slug=activity_slug, offering=course)
if len(activities) != 1:
return NotFoundResponse(request)
activity = activities[0]
display_summary = True # always display for staff
if activity.is_numeric():
activity_stat, _ = generate_numeric_activity_stat(activity, request.member.role)
GradeClass = NumericGrade
else:
activity_stat, _ = generate_letter_activity_stat(activity, request.member.role)
GradeClass = LetterGrade
# counts submissions (individual & group)
submark_stat = {}
submark_stat['submittable'] = bool(SubmissionComponent.objects.filter(activity_id=activity.id))
submark_stat['studentsubmissons'] = len(set((s.member for s in StudentSubmission.objects.filter(activity_id=activity.id))))
submark_stat['groupsubmissons'] = len(set((s.group for s in GroupSubmission.objects.filter(activity_id=activity.id))))
# build counts of how many times each component has been submitted (by unique members/groups)
sub_comps = select_all_components(activity)
subed_comps = dict(((comp.id, set()) for comp in sub_comps))
# build dictionaries of submisson.id -> owner so we can look up quickly when scanning
subid_dict = dict(((s.id, ("s", s.member_id)) for s in StudentSubmission.objects.filter(activity_id=activity.id)))
subid_dict.update( dict(((s.id, ("g", s.group_id)) for s in GroupSubmission.objects.filter(activity_id=activity.id))) )
# build sets of who has submitted each SubmissionComponent
for sc in select_all_submitted_components(activity_id=activity.id):
if sc.component.deleted:
# don't report on deleted components
continue
owner = subid_dict[sc.submission_id]
# Add a sanity check to fix corrupt data
if sc.component_id in subed_comps:
subed_comps[sc.component_id].add(owner)
# actual list of components and counts
sub_comp_rows = []
for comp in sub_comps:
data = {'comp': comp, 'count': len(subed_comps[comp.id])}
sub_comp_rows.append(data)
submark_stat['studentgrades'] = len(set([s.member for s in GradeClass.objects.filter(activity_id=activity.id)]))
if activity.is_numeric():
submark_stat['markable'] = bool(ActivityComponent.objects.filter(numeric_activity_id=activity.id))
submark_stat['studentmarks'] = len(set([s.numeric_grade.member for s in StudentActivityMark.objects.filter(activity_id=activity.id)]))
submark_stat['groupmarks'] = len(set([s.group for s in GroupActivityMark.objects.filter(activity_id=activity.id)]))
else:
submark_stat['markable'] = False
context = {'course': course, 'activity': activity, 'activity_stat': activity_stat, 'display_summary': display_summary, 'submark_stat': submark_stat, 'sub_comp_rows': sub_comp_rows}
resp = render(request, 'grades/activity_stat.html', context)
resp.allow_gstatic_csp = True
return resp
@requires_course_staff_by_slug
def activity_choice(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
context = {'course': course}
return render(request, 'grades/activity_choice.html', context)
@requires_course_staff_by_slug
def edit_cutoffs(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(CalLetterActivity, slug=activity_slug, offering=course, deleted=False)
if request.method == 'POST':
form = CutoffForm(request.POST)
if form.is_valid(): # All validation rules pass
activity.set_cutoffs(form.cleaned_data['cutoffs'])
activity.save()
if form.cleaned_data['ap'] > activity.numeric_activity.max_grade:
messages.warning(request, "Some grade cutoffs are higher than the maximum grade for %s." % (activity.numeric_activity.name))
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("edited %s cutoffs") % (activity),
related_object=activity)
l.save()
messages.success(request, "Grade cutoffs updated.")
try:
ignored = calculate_letter_grade(course, activity)
if ignored == 1:
messages.warning(request, "Did not calculate letter grade for 1 manually-graded student.")
elif ignored > 1:
messages.warning(request, "Did not calculate letter grade for %i manually-graded students." % (ignored))
except ValidationError as e:
messages.error(request, e.args[0])
except NotImplementedError:
return NotFoundResponse(request)
return HttpResponseRedirect(reverse('offering:activity_info', kwargs={'course_slug': course.slug, 'activity_slug': activity.slug}))
else:
cutoff=activity.get_cutoffs()
cutoffsdict=_cutoffsdict(cutoff)
form=CutoffForm(cutoffsdict)
source_grades = activity.numeric_activity.numericgrade_set.exclude(flag="NOGR")
source_grades = '[' + ", ".join(["%.2f" % (g.value) for g in source_grades]) + ']'
context = {'course': course, 'activity': activity, 'cutoff':form, 'source_grades': source_grades}
resp = render(request, 'grades/edit_cutoffs.html', context)
resp.allow_gstatic_csp = True
return resp
def _cutoffsdict(cutoff):
data = dict()
data['ap'] = cutoff[0]
data['a'] = cutoff[1]
data['am'] = cutoff[2]
data['bp'] = cutoff[3]
data['b'] = cutoff[4]
data['bm'] = cutoff[5]
data['cp'] = cutoff[6]
data['c'] = cutoff[7]
data['cm'] = cutoff[8]
data['d'] = cutoff[9]
return data
@requires_course_staff_by_slug
def compare_official(request, course_slug, activity_slug):
"""
Screen to compare member.official_grade to this letter activity
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(LetterActivity, slug=activity_slug, offering=course, deleted=False)
members = Member.objects.filter(offering=course, role='STUD')
grades = dict(((g.member, g.letter_grade)for g in LetterGrade.objects.filter(activity_id=activity.id).exclude(flag='NOGR')))
data = []
for m in members:
if m in grades:
g = grades[m]
else:
g = None
data.append((m, g, m.official_grade!=g))
#print data
context = {'course': course, 'activity': activity, 'data': data}
return render(request, 'grades/compare_official.html', context)
from dashboard.letters import grade_change_form
@requires_course_staff_by_slug
def grade_change(request, course_slug, activity_slug, userid):
"""
Produce grade change form
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(LetterActivity, slug=activity_slug, offering=course, deleted=False)
member = get_object_or_404(Member, ~Q(role='DROP'), find_member(userid), offering__slug=course_slug)
user = Person.objects.get(userid=request.user.username)
grades = LetterGrade.objects.filter(activity_id=activity.id, member=member).exclude(flag='NOGR')
if grades:
grade = grades[0].letter_grade
else:
grade = None
response = HttpResponse(content_type="application/pdf")
response['Content-Disposition'] = 'inline; filename="%s-gradechange.pdf"' % (userid)
grade_change_form(member, member.official_grade, grade, user, response)
return response
@requires_course_staff_by_slug
def add_numeric_activity(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities_list = [(None, '\u2014'),]
activities = all_activities_filter(course)
for a in activities:
if a.group == True:
activities_list.append((a.slug, a.name))
if request.method == 'POST': # If the form has been submitted...
form = NumericActivityForm(request.POST, previous_activities=activities_list) # A form bound to the POST data
form.activate_addform_validation(course_slug)
if form.is_valid(): # All validation rules pass
try:
aggr_dict = Activity.objects.filter(offering=course).aggregate(Max('position'))
if not aggr_dict['position__max']:
position = 1
else:
position = aggr_dict['position__max'] + 1
config = {
'showstats': form.cleaned_data['showstats'],
'showhisto': form.cleaned_data['showhisto'],
'url': form.cleaned_data['url'],
}
a = NumericActivity.objects.create(name=form.cleaned_data['name'],
short_name=form.cleaned_data['short_name'],
status=form.cleaned_data['status'],
due_date=form.cleaned_data['due_date'],
percent=form.cleaned_data['percent'],
max_grade=form.cleaned_data['max_grade'],
offering=course, position=position,
group=GROUP_STATUS_MAP[form.cleaned_data['group']],
config=config)
if a.group == True and form.cleaned_data['extend_group'] is not None:
a2 = [i for i in activities if i.slug == form.cleaned_data['extend_group']]
if len(a2) > 0:
add_activity_to_group(a, a2[0], course)
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("created a numeric activity %s") % (a),
related_object=a)
l.save()
except NotImplementedError:
return NotFoundResponse(request)
messages.success(request, 'New activity "%s" added' % a.name)
_semester_date_warning(request, a)
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))
else:
messages.error(request, "Please correct the error below")
else:
form = NumericActivityForm(previous_activities=activities_list)
context = {'course': course, 'form': form, 'form_type': FORMTYPE['add']}
return render(request, 'grades/numeric_activity_form.html', context)
@requires_course_staff_by_slug
def add_cal_numeric_activity(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
numeric_activities = NumericActivity.objects.filter(offering=course, deleted=False)
if request.method == 'POST': # If the form has been submitted...
form = CalNumericActivityForm(request.POST) # A form bound to the POST data
form.activate_addform_validation(course_slug)
if form.is_valid(): # All validation rules pass
try:
aggr_dict = Activity.objects.filter(offering=course).aggregate(Max('position'))
if not aggr_dict['position__max']:
position = 1
else:
position = aggr_dict['position__max'] + 1
config = {
'showstats': form.cleaned_data['showstats'],
'showhisto': form.cleaned_data['showhisto'],
'calculation_leak': form.cleaned_data['calculation_leak'],
'url': form.cleaned_data['url'],
}
CalNumericActivity.objects.create(name=form.cleaned_data['name'],
short_name=form.cleaned_data['short_name'],
status=form.cleaned_data['status'],
percent=form.cleaned_data['percent'],
max_grade=form.cleaned_data['max_grade'],
formula=form.cleaned_data['formula'],
offering=course,
position=position,
group=False,
config=config)
except NotImplementedError:
return NotFoundResponse(request)
messages.success(request, 'New activity "%s" added' % form.cleaned_data['name'])
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))
else:
messages.error(request, "Please correct the error below")
else:
form = CalNumericActivityForm(initial={'formula': '[[activitytotal]]'})
context = {'course': course, 'form': form, 'numeric_activities': numeric_activities, 'form_type': FORMTYPE['add']}
resp = render(request, 'grades/cal_numeric_activity_form.html', context)
resp.has_inline_script = True # insert activity in formula links
return resp
@requires_course_staff_by_slug
def add_cal_letter_activity(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
letter_activities = LetterActivity.objects.filter(offering=course)
numact_choices = [(na.pk, na.name) for na in NumericActivity.objects.filter(offering=course, deleted=False)]
examact_choices = [(0, '\u2014')] + [(na.pk, na.name) for na in Activity.objects.filter(offering=course, deleted=False)]
if request.method == 'POST': # If the form has been submitted...
form = CalLetterActivityForm(request.POST) # A form bound to the POST data
form.fields['numeric_activity'].choices = numact_choices
form.fields['exam_activity'].choices = examact_choices
form.activate_addform_validation(course_slug)
if form.is_valid(): # All validation rules pass
try:
aggr_dict = Activity.objects.filter(offering=course).aggregate(Max('position'))
if not aggr_dict['position__max']:
position = 1
else:
position = aggr_dict['position__max'] + 1
if form.cleaned_data['exam_activity'] == '0':
exam_activity_id = None
else:
exam_activity = Activity.objects.get(pk=form.cleaned_data['exam_activity'])
exam_activity_id = exam_activity.id
config = {
'showstats': form.cleaned_data['showstats'],
'showhisto': form.cleaned_data['showhisto'],
'url': form.cleaned_data['url'],
}
CalLetterActivity.objects.create(name=form.cleaned_data['name'],
short_name=form.cleaned_data['short_name'],
status=form.cleaned_data['status'],
numeric_activity=NumericActivity.objects.get(pk=form.cleaned_data['numeric_activity']),
exam_activity_id=exam_activity_id,
offering=course,
position=position,
group=False,
config=config)
except NotImplementedError:
return NotFoundResponse(request)
messages.success(request, 'New activity "%s" added' % form.cleaned_data['name'])
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))
else:
messages.error(request, "Please correct the error below")
else:
form = CalLetterActivityForm()
form.fields['numeric_activity'].choices = numact_choices
form.fields['exam_activity'].choices = examact_choices
context = {'course': course, 'form': form, 'letter_activities': letter_activities, 'form_type': FORMTYPE['add']}
return render(request, 'grades/cal_letter_activity_form.html', context)
@requires_course_staff_by_slug
def formula_tester(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
numeric_activities = NumericActivity.objects.filter(offering=course, deleted=False)
result = ""
if 'formula' in request.GET: # If the form has been submitted...
activity_entries = []
faked_activities = [] # used to evaluate the formula
has_error = False
for numeric_activity in numeric_activities:
activity_form_entry = ActivityFormEntry(request.GET, prefix=numeric_activity.slug)
if not activity_form_entry.is_valid():
has_error = True
else:
value = activity_form_entry.cleaned_data['value']
if not value:
value = 0
faked_activities.append(FakeActivity(numeric_activity.name, numeric_activity.short_name,
activity_form_entry.cleaned_data['status'],
numeric_activity.max_grade, numeric_activity.percent,
value))
activity_entries.append(FormulaTesterActivityEntry(numeric_activity, activity_form_entry))
formula_form_entry = FormulaFormEntry(request.GET)
formula_form_entry.activate_form_entry_validation(course_slug, None)
if not formula_form_entry.is_valid():
has_error = True
if has_error:
messages.error(request, "Please correct the error below")
else:
parsed_expr = pickle.loads(formula_form_entry.pickled_formula)
act_dict = activities_dictionary(faked_activities)
try:
result = eval_parse(parsed_expr, FakeEvalActivity(course), act_dict, None, True)
except EvalException:
messages.error(request, "Can not evaluate formula")
else:
activity_entries = []
for numeric_activity in numeric_activities:
activity_form_entry = ActivityFormEntry(prefix=numeric_activity.slug)
activity_entries.append(FormulaTesterActivityEntry(numeric_activity, activity_form_entry))
formula_form_entry = FormulaFormEntry()
context = {'course': course, 'activity_entries': activity_entries,
'formula_form_entry': formula_form_entry, 'result': result}
return render(request, 'grades/formula_tester.html', context)
@requires_course_staff_by_slug
def calculate_all(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(CalNumericActivity, slug=activity_slug, offering=course, deleted=False)
try:
ignored, hiding_info = calculate_numeric_grade(course,activity)
if hiding_info:
messages.warning(request, "This activity is released to students, but the calculation uses unreleased grades. Calculations done with unreleased activities as zero to prevent leaking hidden info to students.")
if ignored==1:
messages.warning(request, "Did not calculate grade for 1 manually-graded student.")
elif ignored>1:
messages.warning(request, "Did not calculate grade for %i manually-graded students." % (ignored))
except ValidationError as e:
messages.error(request, e.args[0])
except EvalException as e:
messages.error(request, e.args[0])
except NotImplementedError:
return NotFoundResponse(request)
return HttpResponseRedirect(activity.get_absolute_url())
@requires_course_staff_by_slug
def calculate_all_lettergrades(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(CalLetterActivity, slug=activity_slug, offering=course, deleted=False)
try:
ignored = calculate_letter_grade(course,activity)
if ignored==1:
messages.warning(request, "Did not calculate letter grade for 1 manually-graded student.")
elif ignored>1:
messages.warning(request, "Did not calculate letter grade for %i manually-graded students." % (ignored))
except ValidationError as e:
messages.error(request, e.args[0])
except NotImplementedError:
return NotFoundResponse(request)
return HttpResponseRedirect(activity.get_absolute_url())
@requires_course_staff_by_slug
def calculate_individual_ajax(request, course_slug, activity_slug):
"""
Ajax way to calculate individual numeric grade.
This ajav view function is called in the activity_info page.
"""
if request.method == 'POST':
userid = request.POST.get('userid')
if userid == None:
return ForbiddenResponse(request)
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(CalNumericActivity, slug=activity_slug, offering=course, deleted=False)
member = get_object_or_404(Member, offering=course, person__userid=userid, role='STUD')
try:
displayable_result, _ = calculate_numeric_grade(course,activity, member)
except ValidationError:
return ForbiddenResponse(request)
except EvalException:
return ForbiddenResponse(request)
except NotImplementedError:
return ForbiddenResponse(request)
return HttpResponse(displayable_result)
return ForbiddenResponse(request)
def _create_activity_formdatadict(activity):
if not [activity for activity_type in ACTIVITY_TYPES if isinstance(activity, activity_type)]:
return
data = dict()
data['name'] = activity.name
data['short_name'] = activity.short_name
data['status'] = activity.status
data['due_date'] = activity.due_date
data['percent'] = activity.percent
data['url'] = ''
if 'url' in activity.config:
data['url'] = activity.config['url']
data['showstats'] = True
if 'showstats' in activity.config:
data['showstats'] = activity.config['showstats']
data['showhisto'] = True
if 'showhisto' in activity.config:
data['showhisto'] = activity.config['showhisto']
if 'calculation_leak' in activity.config:
data['calculation_leak'] = activity.config['calculation_leak']
for (k, v) in list(GROUP_STATUS_MAP.items()):
if activity.group == v:
data['group'] = k
if isinstance(activity, NumericActivity):
data['max_grade'] = activity.max_grade
if isinstance(activity, CalNumericActivity):
data['formula'] = activity.formula
if isinstance(activity, CalLetterActivity):
data['numeric_activity'] = activity.numeric_activity_id
data['exam_activity'] = activity.exam_activity_id
return data
def _populate_activity_from_formdata(activity, data):
if not [activity for activity_type in ACTIVITY_TYPES if isinstance(activity, activity_type)]:
return
if 'name' in data:
activity.name = data['name']
if 'short_name' in data:
activity.short_name = data['short_name']
if 'status' in data:
activity.status = data['status']
if 'due_date' in data:
activity.due_date = data['due_date']
if 'percent' in data:
activity.percent = data['percent']
if 'group' in data:
activity.group = GROUP_STATUS_MAP[data['group']]
if 'max_grade' in data:
activity.max_grade = data['max_grade']
if 'formula' in data:
activity.formula = data['formula']
if 'url' in data:
activity.config['url'] = data['url']
if 'showstats' in data:
activity.config['showstats'] = data['showstats']
if 'showhisto' in data:
activity.config['showhisto'] = data['showhisto']
if 'calculation_leak' in data:
activity.config['calculation_leak'] = data['calculation_leak']
if 'numeric_activity' in data:
activity.numeric_activity = NumericActivity.objects.get(pk=data['numeric_activity'])
if 'exam_activity' in data:
try:
activity.exam_activity = Activity.objects.get(pk=data['exam_activity'])
except Activity.DoesNotExist:
activity.exam_activity = None
def _semester_date_warning(request, activity):
"""
Generate warnings for this request if activity due date is outside semester boundaries.
"""
if not activity.due_date:
return
# don't warn for 24 hours after the last day of classes (start of last day + 48 hours)
if activity.due_date > datetime.datetime.combine(
activity.offering.semester.end, datetime.time(0,0,0)) + datetime.timedelta(hours=48):
messages.warning(request, "Activity is due after the end of the semester.")
if activity.due_date < datetime.datetime.combine(
activity.offering.semester.start, datetime.time(0,0,0)):
messages.warning(request, "Activity is due before the start of the semester.")
@requires_course_staff_by_slug
def edit_activity(request, course_slug, activity_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities = all_activities_filter(slug=activity_slug, offering=course)
numact_choices = [(na.pk, na.name) for na in NumericActivity.objects.filter(offering=course, deleted=False)]
examact_choices = [(0, '\u2014')] + [(na.pk, na.name) for na in Activity.objects.filter(offering=course, deleted=False)]
if (len(activities) == 1):
activity = activities[0]
# extend group options
activities_list = [(None, '\u2014'),]
activities = all_activities_filter(offering=course)
for a in activities:
if a.group == True and a.id != activity.id:
activities_list.append((a.slug, a.name))
from_page = request.GET.get('from_page')
if request.method == 'POST': # If the form has been submitted...
if isinstance(activity, CalNumericActivity):
form = CalNumericActivityForm(request.POST)
elif isinstance(activity, NumericActivity):
form = NumericActivityForm(request.POST, previous_activities=activities_list)
elif isinstance(activity, CalLetterActivity):
form = CalLetterActivityForm(request.POST)
form.fields['numeric_activity'].choices = numact_choices
form.fields['exam_activity'].choices = examact_choices
elif isinstance(activity, LetterActivity):
form = LetterActivityForm(request.POST, previous_activities=activities_list)
form.activate_editform_validation(course_slug, activity_slug)
if form.is_valid(): # All validation rules pass
_populate_activity_from_formdata(activity, form.cleaned_data)
if activity.group == True and form.cleaned_data['extend_group'] is not None:
a2 = [i for i in activities if i.slug == form.cleaned_data['extend_group']]
if len(a2) > 0:
add_activity_to_group(activity, a2[0], course)
activity.save(entered_by=request.user.username)
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("edited %s") % (activity),
related_object=activity)
l.save()
messages.success(request, "Details of %s updated" % activity.name)
_semester_date_warning(request, activity)
if from_page == FROMPAGE['course']:
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))
else:
return HttpResponseRedirect(reverse('offering:activity_info',
kwargs={'course_slug': course_slug, 'activity_slug': activity.slug}))
else:
messages.error(request, "Please correct the error below")
else:
datadict = _create_activity_formdatadict(activity)
if isinstance(activity, CalNumericActivity):
form = CalNumericActivityForm(initial=datadict)
elif isinstance(activity, NumericActivity):
form = NumericActivityForm(initial=datadict, previous_activities=activities_list)
elif isinstance(activity, CalLetterActivity):
form = CalLetterActivityForm(initial=datadict)
form.fields['numeric_activity'].choices = numact_choices
form.fields['exam_activity'].choices = examact_choices
# set initial value in form to current value
elif isinstance(activity, LetterActivity):
form = LetterActivityForm(initial=datadict, previous_activities=activities_list)
elif isinstance(activity, CalLetterActivity):
form = CalLetterActivityForm(initial=datadict)
form.fields['numeric_activity'].choices = numact_choices
form.fields['exam_activity'].choices = examact_choices
form.activate_editform_validation(course_slug, activity_slug)
if isinstance(activity, CalNumericActivity):
numeric_activities = NumericActivity.objects.exclude(slug=activity_slug).filter(offering=course, deleted=False)
context = {'course': course, 'activity': activity, 'form': form, 'numeric_activities': numeric_activities, 'form_type': FORMTYPE['edit'], 'from_page': from_page}
resp = render(request, 'grades/cal_numeric_activity_form.html', context)
resp.has_inline_script = True # insert activity in formula links
return resp
elif isinstance(activity, NumericActivity):
context = {'course': course, 'activity': activity, 'form': form, 'form_type': FORMTYPE['edit'], 'from_page': from_page}
return render(request, 'grades/numeric_activity_form.html', context)
elif isinstance(activity, CalLetterActivity):
context = {'course': course, 'activity': activity, 'form': form, 'form_type': FORMTYPE['edit'], 'from_page': from_page}
return render(request, 'grades/cal_letter_activity_form.html', context)
elif isinstance(activity, LetterActivity):
context = {'course': course, 'activity': activity, 'form': form, 'form_type': FORMTYPE['edit'], 'from_page': from_page}
return render(request, 'grades/letter_activity_form.html', context)
else:
return NotFoundResponse(request)
@requires_course_staff_by_slug
def delete_activity(request, course_slug, activity_slug):
"""
Flag activity as deleted
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(Activity, slug=activity_slug, offering=course)
if request.method == 'POST':
if not Member.objects.filter(offering=course, person__userid=request.user.username, role="INST"):
# only instructors can delete
return ForbiddenResponse(request, "Only instructors can delete activities")
activity.safely_delete()
messages.success(request, 'Activity deleted. It can be restored by the system adminstrator in an emergency.')
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("activity %s marked deleted") % (activity),
related_object=course)
l.save()
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course.slug}))
else:
return ForbiddenResponse(request)
@requires_course_staff_by_slug
def release_activity(request, course_slug, activity_slug):
"""
Bump activity status: INVI -> URLS, URLS -> RLS.
"""
course = get_object_or_404(CourseOffering, slug=course_slug)
activity = get_object_or_404(Activity, slug=activity_slug, offering=course, deleted=False)
if request.method == 'POST':
if activity.status == "INVI":
activity.status = "URLS"
activity.save(entered_by=request.user.username)
messages.success(request, 'Activity made visible to students (but grades are still unreleased).')
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("activity %s made visible") % (activity),
related_object=course)
l.save()
elif activity.status == "URLS":
activity.status = "RLS"
activity.save(entered_by=request.user.username)
messages.success(request, 'Grades released to students.')
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("activity %s grades released") % (activity),
related_object=course)
l.save()
return HttpResponseRedirect(reverse('offering:activity_info', kwargs={'course_slug': course.slug, 'activity_slug': activity.slug}))
else:
return ForbiddenResponse(request)
@requires_course_staff_by_slug
def add_letter_activity(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities_list = [(None, '\u2014'),]
activities = all_activities_filter(course)
for a in activities:
if a.group == True:
activities_list.append((a.slug, a.name))
if request.method == 'POST': # If the form has been submitted...
form = LetterActivityForm(request.POST, previous_activities=activities_list) # A form bound to the POST data
form.activate_addform_validation(course_slug)
if form.is_valid(): # All validation rules pass
#try:
aggr_dict = Activity.objects.filter(offering=course).aggregate(Max('position'))
if not aggr_dict['position__max']:
position = 1
else:
position = aggr_dict['position__max'] + 1
config = {
'showstats': form.cleaned_data['showstats'],
'showhisto': form.cleaned_data['showhisto'],
'url': form.cleaned_data['url'],
}
a = LetterActivity.objects.create(name=form.cleaned_data['name'],
short_name=form.cleaned_data['short_name'],
status=form.cleaned_data['status'],
due_date=form.cleaned_data['due_date'],
percent=form.cleaned_data['percent'],
offering=course, position=position,
group=GROUP_STATUS_MAP[form.cleaned_data['group']],
config=config)
if a.group == True and form.cleaned_data['extend_group'] is not None:
a2 = [i for i in activities if i.slug == form.cleaned_data['extend_group']]
if len(a2) > 0:
add_activity_to_group(a, a2[0], course)
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("created a letter-graded activity %s") % (a),
related_object=a)
l.save()
messages.success(request, 'New activity "%s" added' % a.name)
_semester_date_warning(request, a)
return HttpResponseRedirect(reverse('offering:course_info',
kwargs={'course_slug': course_slug}))
else:
form = LetterActivityForm(previous_activities=activities_list)
activities = course.activity_set.all()
context = {'course': course, 'form': form, 'form_type': FORMTYPE['add']}
return render(request, 'grades/letter_activity_form.html', context)
@requires_course_staff_by_slug
def all_grades(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
activities = all_activities_filter(offering=course)
students = Member.objects.filter(offering=course, role="STUD").select_related('person', 'offering')
# get grade data into a format we can work with
grades = {}
for a in activities:
grades[a.slug] = {}
if hasattr(a, 'numericgrade_set'):
gs = a.numericgrade_set.all().select_related('member', 'member__person')
else:
gs = a.lettergrade_set.all().select_related('member', 'member__person')
for g in gs:
grades[a.slug][g.member.person.userid] = g
context = {'course': course, 'students': students, 'activities': activities, 'grades': grades}
return render(request, 'grades/all_grades.html', context)
def _all_grades_output(response, course):
activities = all_activities_filter(offering=course)
students = Member.objects.filter(offering=course, role="STUD").select_related('person')
# get grade data into a format we can work with
labtut = course.labtut
grades = {}
for a in activities:
grades[a.slug] = {}
if hasattr(a, 'numericgrade_set'):
gs = a.numericgrade_set.all().select_related('member', 'member__person')
else:
gs = a.lettergrade_set.all().select_related('member', 'member__person')
for g in gs:
grades[a.slug][g.member.person.userid] = g
# output results
writer = csv.writer(response)
row = ['Last name', 'First name', Person.userid_header(), Person.emplid_header()]
if labtut:
row.append('Lab/Tutorial')
for a in activities:
row.append(a.short_name)
writer.writerow(row)
for s in students:
row = [s.person.last_name, s.person.first_name, s.person.userid, s.person.emplid]
if labtut:
row.append(s.labtut_section or '')
for a in activities:
try:
gr = grades[a.slug][s.person.userid]
if gr.flag=='NOGR':
g = ''
else:
if a.is_numeric():
g = gr.value
else:
g = gr.letter_grade
except KeyError:
g = ''
row.append(g)
writer.writerow(row)
@requires_course_staff_by_slug
def all_grades_csv(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % (course_slug)
_all_grades_output(response, course)
return response
@requires_course_staff_by_slug
def grade_history(request, course_slug):
"""
Dump all GradeHistory for the offering to a CSV
"""
offering = get_object_or_404(CourseOffering, slug=course_slug)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'inline; filename="%s-history.csv"' % (course_slug,)
writer = csv.writer(response)
writer.writerow(['Date/Time', 'Activity', 'Student', 'Entered By', 'Numeric Grade', 'Letter Grade', 'Status', 'Group'])
grade_histories = GradeHistory.objects.filter(activity__offering=offering, status_change=False) \
.select_related('entered_by', 'activity', 'member__person', 'group')
for gh in grade_histories:
writer.writerow([
gh.timestamp,
gh.activity.short_name,
gh.member.person.userid_or_emplid(),
gh.entered_by.userid_or_emplid(),
gh.numeric_grade,
gh.letter_grade,
FLAGS.get(gh.grade_flag, None),
gh.group.slug if gh.group else None,
])
return response
@requires_course_staff_by_slug
def class_list(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
members = Member.objects.filter(offering=course, role="STUD").select_related('person', 'offering')
gms = GroupMember.objects.filter(confirmed=True, student__offering=course).select_related('group', 'group__courseoffering')
groups = {}
for gm in gms:
gs = groups.get(gm.student_id, set())
groups[gm.student_id] = gs
gs.add(gm.group)
rows = []
for m in members:
data = {'member': m, 'groups': groups.get(m.id, [])}
rows.append(data)
context = {'course': course, 'rows': rows}
return render(request, 'grades/class_list.html', context)
def has_photo_agreement(user):
configs = UserConfig.objects.filter(user=user, key='photo-agreement')
return bool(configs and configs[0].value['agree'])
PHOTO_LIST_STYLES = set(['table', 'horiz', 'signin'])
@requires_course_staff_by_slug
def photo_list(request, course_slug, style='horiz'):
if style not in PHOTO_LIST_STYLES:
raise Http404
user = get_object_or_404(Person, userid=request.user.username)
if not has_photo_agreement(user):
url = reverse('config:photo_agreement') + '?return=' + urllib.parse.quote(request.path)
return ForbiddenResponse(request, mark_safe('You must <a href="%s">confirm the photo usage agreement</a> before seeing student photos.' % (url)))
course = get_object_or_404(CourseOffering, slug=course_slug)
members = Member.objects.filter(offering=course, role="STUD").select_related('person', 'offering')
# fire off a task to fetch the photos and warm the cache
pre_fetch_photos(m.person.emplid for m in members)
context = {'course': course, 'members': members}
return render(request, 'grades/photo_list_%s.html' % (style), context)
@login_required
def student_photo(request, emplid):
# confirm user's photo agreement
user = get_object_or_404(Person, userid=request.user.username)
can_access = False
if Role.objects_fresh.filter(person=user, role__in=['ADVS', 'ADVM']):
can_access = True
else:
if not has_photo_agreement(user):
url = reverse('config:photo_agreement') + '?return=' + urllib.parse.quote(request.path)
return ForbiddenResponse(request, mark_safe('You must <a href="%s">confirm the photo usage agreement</a> before seeing student photos.' % (url)))
# confirm user is an instructor of this student (within the last two years)
# TODO: cache past_semester to save the query?
past_semester = Semester.get_semester(datetime.date.today() - datetime.timedelta(days=730))
student_members = Member.objects.filter(offering__semester__name__gte=past_semester.name,
person__emplid=emplid, role='STUD').select_related('offering')
student_offerings = [m.offering for m in student_members]
instructor_of = Member.objects.filter(person=user, role='INST', offering__in=student_offerings)
can_access = (instructor_of.count() > 0)
if not can_access:
return ForbiddenResponse(request, 'You must be an instructor of this student.')
# get the photo
data, status = photo_for_view(emplid)
# return the photo
response = HttpResponse(data, content_type='image/jpeg')
response.status_code = status
response['Content-Disposition'] = 'inline; filename="%s.jpg"' % (emplid)
response['Cache-Control'] = 'private, max-age=300'
response.slow_okay = True
return response
@requires_course_staff_by_slug
def new_message(request, course_slug):
offering = get_object_or_404(CourseOffering, slug=course_slug)
staff = get_object_or_404(Person, userid=request.user.username)
default_message = NewsItem(user=staff, author=staff, course=offering, source_app="dashboard")
if request.method =='POST':
form = MessageForm(data=request.POST, instance=default_message)
if form.is_valid()==True:
NewsItem.for_members(member_kwargs={'offering': offering}, newsitem_kwargs={
'author': staff, 'course': offering, 'source_app': 'dashboard',
'title': form.cleaned_data['title'], 'content': form.cleaned_data['content'],
'url': form.cleaned_data['url'], 'markup': form.cleaned_data['_markup']})
#LOG EVENT#
l = LogEntry(userid=request.user.username,
description=("created a message for every student in %s") % (offering),
related_object=offering)
l.save()
messages.add_message(request, messages.SUCCESS, 'News item created.')
return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': offering.slug}))
else:
form = MessageForm()
return render(request, "grades/new_message.html", {"form" : form,'course': offering})
@requires_course_staff_by_slug
def student_search(request, course_slug):
course = get_object_or_404(CourseOffering, slug=course_slug)
if request.method == 'POST':
# find the student if we can and redirect to info page
form = StudentSearchForm(request.POST)
if not form.is_valid():
messages.add_message(request, messages.ERROR, 'Invalid search')
context = {'course': course, 'form': form}
return render(request, 'grades/student_search.html', context)
search = form.cleaned_data['search']
try:
int(search)
students = Member.objects.filter(offering=course, role="STUD").filter(Q(person__userid=search) | Q(person__emplid=search))
except ValueError:
students = Member.objects.filter(offering=course, role="STUD").filter(person__userid=search)
if len(students)!=1:
if len(students)==0:
messages.add_message(request, messages.ERROR, 'No student found')
else:
messages.add_message(request, messages.ERROR, 'Multiple students found')
context = {'course': course, 'form': form}
return render(request, 'grades/student_search.html', context)
student = students[0]
return HttpResponseRedirect(reverse('offering:student_info',
kwargs={'course_slug': course_slug, 'userid': student.person.userid}))
form = StudentSearchForm()
context = {'course': course, 'form': form}
return render(request, 'grades/student_search.html', context)
@requires_course_staff_by_slug
def student_info(request, course_slug, userid):
course = get_object_or_404(CourseOffering, slug=course_slug)
member = get_object_or_404(Member, ~Q(role='DROP'), find_member(userid), offering__slug=course_slug)
requestor = get_object_or_404(Member, ~Q(role='DROP'), person__userid=request.user.username, offering__slug=course_slug)
activities = all_activities_filter(offering=course)
if member.role != "STUD":
return NotFoundResponse(request)
grade_info = []
for a in activities:
info = {'act': a}
# get grade
if hasattr(a, 'numericgrade_set'):
gs = a.numericgrade_set.filter(member=member)
else:
gs = a.lettergrade_set.filter(member=member)
if gs:
info['grade'] = gs[0]
else:
info['grade'] = None
# find most recent submission
sub_info = SubmissionInfo(student=member.person, activity=a)
info['sub'] = sub_info.have_submitted()
grade_info.append(info)
# find marking info
info['marked'] = False
if StudentActivityMark.objects.filter(activity_id=a.id, numeric_grade__member=member):
info['marked'] = True
gms = GroupMember.objects.filter(activity_id=a.id, student=member, confirmed=True)
if gms:
# in a group
gm = gms[0]
if GroupActivityMark.objects.filter(activity_id=a.id, group=gm.group):
info['marked'] = True
dishonesty_cases = []
if requestor.role in ['INST', 'APPR']:
from discipline.models import DisciplineCaseInstrStudent
dishonesty_cases = DisciplineCaseInstrStudent.objects.filter(offering=course, student=member.person)
group_memberships = GroupMember.objects.filter(student=member, activity__offering__slug=course_slug)
grade_history = GradeHistory.objects.filter(member=member, status_change=False).select_related('entered_by', 'activity', 'group', 'mark')
#grade_history = GradeHistory.objects.filter(member=member).select_related('entered_by', 'activity', 'group', 'mark')
context = {'course': course, 'member': member, 'grade_info': grade_info, 'group_memberships': group_memberships,
'grade_history': grade_history, 'dishonesty_cases': dishonesty_cases, 'can_photo': has_photo_agreement(requestor.person)}
return render(request, 'grades/student_info.html', context)
@requires_course_staff_by_slug
def export_all(request, course_slug):
"""
Export everything we can about this offering
"""
import io, tempfile, zipfile, os, json
from django.http import StreamingHttpResponse
from wsgiref.util import FileWrapper
from marking.views import _mark_export_data, _DecimalEncoder
from discuss.models import DiscussionTopic
course = get_object_or_404(CourseOffering, slug=course_slug)
handle, filename = tempfile.mkstemp('.zip')
os.close(handle)
z = zipfile.ZipFile(filename, 'w')
# add all grades CSV
allgrades = io.StringIO()
_all_grades_output(allgrades, course)
z.writestr("grades.csv", allgrades.getvalue())
allgrades.close()
# add marking data
acts = all_activities_filter(course)
for a in acts:
if ActivityComponent.objects.filter(numeric_activity_id=a.id):
markingdata = _mark_export_data(a)
markout = io.StringIO()
json.dump({'marks': markingdata}, markout, cls=_DecimalEncoder, indent=1)
z.writestr(a.slug + "-marking.json", markout.getvalue())
del markout, markingdata
# add submissions
acts = all_activities_filter(course)
for a in acts:
submission_info = SubmissionInfo.for_activity(a)
submission_info.get_all_components()
submission_info.generate_submission_contents(z, prefix=a.slug+'-submissions' + os.sep, always_summary=False)
# add discussion
if course.discussion():
topics = DiscussionTopic.objects.filter(offering=course).order_by('-pinned', '-last_activity_at')
discussion_data = [t.exportable() for t in topics]
discussout = io.StringIO()
json.dump(discussion_data, discussout, indent=1)
z.writestr("discussion.json", discussout.getvalue())
del discussion_data, discussout
# return the zip file
z.close()
zipdata = open(filename, 'rb')
response = StreamingHttpResponse(FileWrapper(zipdata), content_type='application/zip')
response['Content-Length'] = os.path.getsize(filename)
response['Content-Disposition'] = 'attachment; filename="' + course.slug + '.zip"'
try:
os.remove(filename)
except OSError:
pass
return response
|
sfu-fas/coursys
|
grades/views.py
|
Python
|
gpl-3.0
| 71,329
|
__problem_title__ = "Comfortable distance"
__problem_url___ = "https://projecteuler.net/problem=364"
__problem_description__ = "There are seats in a row. people come after each other to fill the " \
"seats according to the following rules: We can verify that T(10) = " \
"61632 and T(1 000) mod 100 000 007 = 47255094. Find T(1 000 000) mod " \
"100 000 007."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
jrichte43/ProjectEuler
|
Problem-0364/solutions.py
|
Python
|
gpl-3.0
| 808
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from rest_framework import serializers
from rest_flex_fields import FlexFieldsModelSerializer
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from easy_thumbnails.templatetags.thumbnail import thumbnail_url
from profiles.apiv2.serializers import ProfileSerializer
from ..models import (
Artist,
Label,
Release,
Media,
Playlist,
PlaylistItem,
PlaylistItemPlaylist,
)
SITE_URL = getattr(settings, "SITE_URL")
class ImageSerializer(serializers.ImageField):
def to_representation(self, instance):
if not instance:
return
return "{}{}".format(SITE_URL, thumbnail_url(instance, "thumbnail_240"))
class ArtistSerializer(
FlexFieldsModelSerializer, serializers.HyperlinkedModelSerializer
):
url = serializers.HyperlinkedIdentityField(
view_name="api:artist-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
detail_url = serializers.URLField(source="get_absolute_url")
image = ImageSerializer(source="main_image")
class Meta:
model = Artist
depth = 1
fields = ["url", "ct", "created", "updated", "id", "detail_url", "uuid", "name", "image"]
class LabelSerializer(
FlexFieldsModelSerializer, serializers.HyperlinkedModelSerializer
):
url = serializers.HyperlinkedIdentityField(
view_name="api:label-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
detail_url = serializers.URLField(source="get_absolute_url")
image = ImageSerializer(source="main_image")
class Meta:
model = Label
depth = 1
fields = ["url", "ct", "created", "updated", "id", "detail_url", "uuid", "name", "image"]
class MediaSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="api:media-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
detail_url = serializers.URLField(source="get_absolute_url")
duration = serializers.FloatField(source="master_duration")
artist = serializers.HyperlinkedRelatedField(
many=False, read_only=True, view_name="api:artist-detail", lookup_field="uuid"
)
release = serializers.HyperlinkedRelatedField(
many=False, read_only=True, view_name="api:release-detail", lookup_field="uuid"
)
artist_display = serializers.CharField(source="get_artist_display")
release_display = serializers.SerializerMethodField()
image = ImageSerializer(source="release.main_image")
def get_release_display(self, obj, **kwargs):
return obj.release.name if obj.release else None
assets = serializers.SerializerMethodField()
def get_assets(self, obj, **kwargs):
# TODO: propperly serialize assets
stream_url = reverse_lazy(
"mediaasset-format",
kwargs={"media_uuid": obj.uuid, "quality": "default", "encoding": "mp3"},
)
waveform_url = reverse_lazy(
"mediaasset-waveform", kwargs={"media_uuid": obj.uuid, "type": "w"}
)
assets = {
"stream": "{}{}".format(SITE_URL, stream_url),
"waveform": "{}{}".format(SITE_URL, waveform_url),
}
# TODO: check if this is a good idea...
# request asset generation for media
# print('request asset generation for {}'.format(obj))
# Format.objects.get_or_create_for_media(media=obj)
# Waveform.objects.get_or_create_for_media(media=obj, type=Waveform.WAVEFORM)
return assets
class Meta:
model = Media
depth = 1
fields = [
"url",
"ct",
"created",
"updated",
"id",
"detail_url",
"uuid",
"image",
"name",
"duration",
"assets",
"isrc",
"artist_display",
"release_display",
"artist",
"release",
]
class ReleaseSerializer(
FlexFieldsSerializerMixin, serializers.HyperlinkedModelSerializer
):
url = serializers.HyperlinkedIdentityField(
view_name="api:release-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
image = ImageSerializer(source="main_image")
detail_url = serializers.URLField(source="get_absolute_url")
releasedate = serializers.CharField(source="releasedate_approx")
media = MediaSerializer(many=True, read_only=True, source="get_media")
artist_display = serializers.CharField(source="get_artist_display")
# label = serializers.HyperlinkedRelatedField(
# many=False,
# read_only=True,
# view_name='api:label-detail', lookup_field="uuid"
# )
label = LabelSerializer(
read_only=True,
)
# TODO: `items` is used for player only. find a way to unify this.
items = serializers.SerializerMethodField()
def get_items(self, obj, **kwargs):
items = []
for media in obj.get_media():
serializer = MediaSerializer(
media, context={"request": self.context["request"]}
)
items.append({"content": serializer.data})
return items
class Meta:
model = Release
depth = 1
fields = [
"url",
"ct",
"uuid",
"created",
"updated",
"id",
"detail_url",
"name",
"image",
"releasedate",
"artist_display",
"media",
"label",
# TODO: `items` is used for player only. find a way to unify this.
"items",
]
# expandable_fields = {
# 'label': (LabelSerializer, {'read_only': True})
# }
class PlaylistItemField(serializers.RelatedField):
"""
A custom field to use for the `item` generic relationship.
"""
def to_representation(self, value):
"""
Serialize tagged objects to a simple textual representation.
"""
if isinstance(value, Media):
# return 'Media: {}'.format(value.pk)
serializer = MediaSerializer(
value, context={"request": self.context["request"]}
)
elif isinstance(value, Media):
return "Jingle: {}".format(value.pk)
else:
raise Exception("Unexpected type of tagged object")
return serializer.data
class PlaylistItemSerializer(serializers.ModelSerializer):
# http://www.django-rest-framework.org/api-guide/relations/#generic-relationships
content = PlaylistItemField(read_only=True, source="content_object")
class Meta:
model = PlaylistItem
depth = 1
fields = ["content"]
class PlaylistItemPlaylistSerializer(serializers.ModelSerializer):
# item = PlaylistItemSerializer(read_only=True)
content = serializers.SerializerMethodField()
def get_content(self, obj, **kwargs):
# TODO: implement for `Jingle`
if isinstance(obj.item.content_object, Media):
serializer = MediaSerializer(
instance=Media.objects.get(pk=obj.item.content_object.pk),
many=False,
context={"request": self.context["request"]},
)
elif isinstance(obj.item.content_object, Media):
serializer = MediaSerializer(
instance=Media.objects.get(pk=obj.item.content_object.pk),
many=False,
context={"request": self.context["request"]},
)
else:
raise Exception("Unexpected type of tagged object")
return serializer.data
class Meta:
model = PlaylistItemPlaylist
depth = 1
fields = [
# 'item',
"content",
"position",
"cue_in",
"cue_out",
"fade_in",
"fade_out",
"fade_cross",
]
class PlaylistSerializer(FlexFieldsModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="api:playlist-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
image = ImageSerializer(source="main_image")
detail_url = serializers.URLField(source="get_absolute_url")
items = PlaylistItemPlaylistSerializer(source="playlist_items", many=True)
tags = serializers.StringRelatedField(many=True)
user = serializers.SerializerMethodField(source="user")
item_appearances = serializers.SerializerMethodField()
dayparts = serializers.SerializerMethodField()
def get_user(self, obj):
if not (obj.user and getattr(obj.user, "profile")):
return
return ProfileSerializer(obj.user.profile, context=self.context).data
def get_item_appearances(self, obj, **kwargs):
items = [
"{}:{}".format(co.content_object.get_ct(), co.content_object.uuid)
for co in obj.get_items()
]
return items
def get_dayparts(self, obj, **kwargs):
return [
{"day": dp.day, "start": dp.time_start, "end": dp.time_end}
for dp in obj.dayparts.active()
]
class Meta:
model = Playlist
depth = 1
fields = [
"url",
"ct",
"uuid",
"id",
"detail_url",
"name",
"series_display",
"image",
"tags",
"user",
"mixdown_file",
"items",
"item_appearances",
"num_media",
"duration",
"dayparts",
]
|
hzlf/openbroadcast.org
|
website/apps/alibrary/apiv2/serializers.py
|
Python
|
gpl-3.0
| 9,901
|
from ImportDependence import *
from CustomClass import *
class CIA(AppForm):
useddf=pd.DataFrame()
Lines = []
Tags = []
description = 'Chemical Index of Alteration'
unuseful = ['Name',
'Mineral',
'Author',
'DataType',
'Label',
'Marker',
'Color',
'Size',
'Alpha',
'Style',
'Width',
'Tag']
reference = '''
CIA = [Al2O3/(Al2O3+CaO*+Na2O+K2O]×100
ICV = (Fe2O3+K2O+Na2O+CaO*+MgO+MnO+TiO2)/Al2O3 (Cox,1995)
PIA = {(Al2O3-K2O)/[(Al2O3-K2O)+CaO*+Na2O]}×100
CIW = [Al2O3/(Al2O3+CaO*+Na2O)]×100
CIW' = [Al2O3/(Al2O3+Na2O)]×100
where CaO* is the amount of CaO incorporated in the silicate fraction of the rock.
CaO* = CaO - (10/3 * P2O5)
if CaO* < Na2O:
CaO* = CaO*
else:
CaO* = Na2O
References:
Nesbitt-CIA-1982
Harnois-CIW-1988
Mclennan-CIA-1993
Cox R-ICV-1995
Fedo-PIA-1995
Cullers-CIW'-2000
Song B W-2013
Cox R, Lowe D R, Cullers R L. The influence of sediment recycling and basement composition on evolution of mudrock chemistry in the southwestern United States[J]. Geochimica Et Cosmochimica Acta, 1995, 59(14):2919-2940.
Harnois, L., 1988, The CIW index: A new chemical index of weathering: Sedimentary Geology, v. 55, p. 319–322. doi:10.1016/0037-0738(88)90137-6
Nesbitt, H.W., and Young, G.M., 1982, Early Proterozoic climates and plate motions inferred from major element chemistry of lutites: Nature, v. 299, p. 715–717. doi:10.1038/299715a0
'''
BaseMass = {'SiO2': 60.083,
'TiO2': 79.865,
'Al2O3': 101.960077,
'TFe2O3': 159.687,
'Fe2O3': 159.687,
'TFeO': 71.844,
'FeO': 71.844,
'MnO': 70.937044,
'MgO': 40.304,
'CaO': 56.077000000000005,
'Na2O': 61.978538560000004,
'K2O': 94.1956,
'P2O5': 141.942523996,
'CO2': 44.009,
'SO3': 80.057,
'FeO': 71.844,
'Fe3O4': 231.531,
'BaO': 153.326,
'SrO': 103.619,
'Cr2O3': 151.98919999999998,
}
def __init__(self, parent=None, df=pd.DataFrame()):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Chemical Index of Alteration & Index of Compositional Variability')
self.items = []
self._df = df
self._df.reindex()
if (len(df) > 0):
self._changed = True
# print('DataFrame recieved to CIA')
self.raw = df
self.raw = self.CleanDataFile(df)
self.rawitems = self.raw.columns.values.tolist()
for i in self.rawitems:
if i not in self.unuseful:
self.items.append(i)
else:
pass
self.create_main_frame()
self.create_status_bar()
def create_main_frame(self):
self.resize(800,600)
self.main_frame = QWidget()
self.dpi = 128
self.setWindowTitle('Chemical Index of Alteration & Index of Compositional Variability')
self.tableView = CustomQTableView(self.main_frame)
self.tableView.setObjectName('tableView')
self.tableView.setSortingEnabled(True)
self.textbox = GrowingTextEdit(self)
self.textbox.setText(self.reference)
# Other GUI controls
self.save_button = QPushButton('&Save')
self.save_button.clicked.connect(self.saveDataFile)
#
# Layout with box sizers
#
self.hbox = QHBoxLayout()
for w in [self.save_button]:
self.hbox.addWidget(w)
self.hbox.setAlignment(w, Qt.AlignVCenter)
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.tableView)
#self.vbox.addWidget(self.tableView)
self.vbox.addLayout(self.hbox)
self.vbox.addWidget(self.textbox)
self.main_frame.setLayout(self.vbox)
self.setCentralWidget(self.main_frame)
def Read(self, inpoints):
points = []
for i in inpoints:
points.append(i.split())
result = []
for i in points:
for l in range(len(i)):
a = float((i[l].split(','))[0])
a = a * self.x_scale
b = float((i[l].split(','))[1])
b = (self.height_load - b) * self.y_scale
result.append((a, b))
return (result)
def CIA(self):
self.WholeData = []
dataframe=pd.DataFrame()
dataframe = self._df
#dataframe.set_index('Label')
ItemsAvalibale = dataframe.columns.values.tolist()
Indexes = dataframe.index.values.tolist()
#ItemsToCheck = ['Label','SiO2','Al2O3','Fe2O3','MgO','CaO','Na2O','K2O','P2O5','MnO','TiO2']
ItemsToTest = ['Number', 'Tag', 'Name', 'Author', 'DataType', 'Marker', 'Color', 'Size', 'Alpha',
'Style', 'Width']
for i in ItemsAvalibale:
if 'O' not in i and i !='Label':
dataframe = dataframe.drop(i, 1)
WholeItemsAvalibale = dataframe.columns.values.tolist()
ItemsAvalibale = dataframe.columns.values.tolist()
Indexes = dataframe.index.values.tolist()
if 'Whole' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('Whole')
if 'CIA' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('CIA')
if 'ICV' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('ICV')
if 'PIA' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('PIA')
if 'CIW' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('CIW')
if 'CIW\'' not in WholeItemsAvalibale:
WholeItemsAvalibale.append('CIW\'')
print('index',Indexes,'\ncolums',WholeItemsAvalibale)
WholeMole=[]
WholeList=[]
dataframe = dataframe.dropna(axis=1,how='all')
print(dataframe)
for j in Indexes:
tmpList=[]
tmpMoleSum=0
tmpcia=0
tmpAl2O3=0
tmpCaO=0
tmpNa2O=0
tmpK2O=0
tmpP2O5=0
tmpFe2O3=0
tmpMgO=0
tmpMnO=0
tmpTiO2=0
#ICV =(Fe2O3+K2O+Na2O+CaO*+MgO+MnO+TiO2)/Al2O3 (Cox,1995)
for i in ItemsAvalibale:
if i in self.BaseMass:
m=dataframe.at[j,i]
n=self.BaseMass[i]
#print('\nm & n is \t',m,n)
tmpmole= m/n
#print(tmpmole)
tmpMoleSum = tmpMoleSum + tmpmole
#tmpList.append(dataframe.at[i,j])
#print('\n total mole is',tmpMoleSum)
for i in ItemsAvalibale:
if i in self.BaseMass:
tmpdata= 100*(dataframe.at[j,i]/self.BaseMass[i])/tmpMoleSum
tmpList.append(tmpdata)
#print(i, tmpdata)
if i =='Al2O3':
tmpAl2O3=tmpdata
elif i =='CaO':
tmpCaO=tmpdata
elif i =='Na2O':
tmpNa2O = tmpdata
elif i =='K2O':
tmpK2O=tmpdata
elif i =='P2O5':
tmpP2O5=tmpdata
elif i =='Fe2O3':
tmpFe2O3=tmpdata
elif i == 'MgO':
tmpMgO = tmpdata
elif i == 'MnO':
tmpMnO = tmpdata
elif i == 'TiO2':
tmpTiO2 = tmpdata
elif i == 'Label' :
tmpdata = dataframe.at[j,i]
tmpList.append(tmpdata)
elif i in WholeItemsAvalibale:
del WholeItemsAvalibale[WholeItemsAvalibale.index(i)]
tmpList.append(tmpMoleSum)
usedCaO=0
middleCaO= tmpCaO-(10/3.0*tmpP2O5)
if middleCaO< tmpNa2O:
usedCaO=middleCaO
else:
usedCaO=tmpNa2O
#print(tmpAl2O3, usedCaO, tmpK2O, tmpNa2O)
CIA=tmpAl2O3/(tmpAl2O3+usedCaO+tmpNa2O+tmpK2O)*100
tmpList.append(CIA)
ICV =(tmpFe2O3+tmpK2O+tmpNa2O+usedCaO+tmpMgO+tmpMnO+tmpTiO2)/tmpAl2O3 #(Cox,1995)
tmpList.append(ICV)
PIA = ((tmpAl2O3-tmpK2O)/(tmpAl2O3-tmpK2O+usedCaO+tmpNa2O))*100
tmpList.append(PIA)
CIW = (tmpAl2O3/(tmpAl2O3+usedCaO+tmpNa2O))*100
tmpList.append(CIW)
CIW2 = (tmpAl2O3/(tmpAl2O3+tmpNa2O))*100
tmpList.append(CIW2)
'''
CIA = [Al2O3/(Al2O3+CaO*+Na2O+K2O]×100
ICV = (Fe2O3+K2O+Na2O+CaO*+MgO+MnO+TiO2)/Al2O3 (Cox,1995)
PIA = {(Al2O3-K2O)/[(Al2O3-K2O)+CaO*+Na2O]}×100
CIW = [Al2O3/(Al2O3+CaO*+Na2O)]×100
CIW' = [Al2O3/(Al2O3+Na2O)]×100
'''
#print(len(tmpList))
WholeList.append(tmpList)
pass
print(len(WholeList))
print(len(WholeItemsAvalibale))
df = pd.DataFrame(WholeList,columns=WholeItemsAvalibale)
self.useddf = df
self.tableView.setModel(PandasModel(self.useddf))
self.show()
def saveDataFile(self):
# if self.model._changed == True:
# print('changed')
# print(self.model._df)
DataFileOutput, ok2 = QFileDialog.getSaveFileName(self,
'文件保存',
'C:/',
'Excel Files (*.xlsx);;CSV Files (*.csv)') # 数据文件保存输出
if (DataFileOutput != ''):
if ('csv' in DataFileOutput):
self.useddf.to_csv(DataFileOutput, sep=',', encoding='utf-8')
elif ('xls' in DataFileOutput):
self.useddf.to_excel(DataFileOutput, encoding='utf-8')
|
chinageology/GeoPython
|
geopytool/CIA.py
|
Python
|
gpl-3.0
| 10,636
|
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import os
import email
import tempfile
import re
from email.header import Header
import email.charset as charset
charset.add_charset('utf-8', charset.QP, charset.QP, 'utf-8')
from email.iterators import typed_subpart_iterator
import logging
import mailcap
from cStringIO import StringIO
import alot.crypto as crypto
import alot.helper as helper
from alot.errors import GPGProblem
from alot.settings import settings
from alot.helper import string_sanitize
from alot.helper import string_decode
from alot.helper import parse_mailcap_nametemplate
from alot.helper import split_commandstring
X_SIGNATURE_VALID_HEADER = 'X-Alot-OpenPGP-Signature-Valid'
X_SIGNATURE_MESSAGE_HEADER = 'X-Alot-OpenPGP-Signature-Message'
def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpgme.Signature`
:param error_msg: `str` containing an error message, the empty
string indicating no error
'''
sig_from = ''
if len(sigs) == 0:
error_msg = error_msg or 'no signature found'
else:
try:
sig_from = crypto.get_key(sigs[0].fpr).uids[0].uid
except:
sig_from = sigs[0].fpr
mail.add_header(
X_SIGNATURE_VALID_HEADER,
'False' if error_msg else 'True',
)
mail.add_header(
X_SIGNATURE_MESSAGE_HEADER,
u'Invalid: {0}'.format(error_msg)
if error_msg else
u'Valid: {0}'.format(sig_from),
)
def get_params(mail, failobj=list(), header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)}
def message_from_file(handle):
'''Reads a mail from the given file-like object and returns an email
object, very much like email.message_from_file. In addition to
that OpenPGP encrypted data is detected and decrypted. If this
succeeds, any mime messages found in the recovered plaintext
message are added to the returned message object.
:param handle: a file-like object
:returns: :class:`email.message.Message` possibly augmented with
decrypted data
'''
m = email.message_from_file(handle)
# make sure noone smuggles a token in (data from m is untrusted)
del m[X_SIGNATURE_VALID_HEADER]
del m[X_SIGNATURE_MESSAGE_HEADER]
p = get_params(m)
app_pgp_sig = 'application/pgp-signature'
app_pgp_enc = 'application/pgp-encrypted'
# handle OpenPGP signed data
if (m.is_multipart() and
m.get_content_subtype() == 'signed' and
p.get('protocol', None) == app_pgp_sig):
# RFC 3156 is quite strict:
# * exactly two messages
# * the second is of type 'application/pgp-signature'
# * the second contains the detached signature
malformed = False
if len(m.get_payload()) != 2:
malformed = u'expected exactly two messages, got {0}'.format(
len(m.get_payload()))
ct = m.get_payload(1).get_content_type()
if ct != app_pgp_sig:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
app_pgp_sig, ct)
# TODO: RFC 3156 says the alg has to be lower case, but I've
# seen a message with 'PGP-'. maybe we should be more
# permissive here, or maybe not, this is crypto stuff...
if not p.get('micalg', 'nothing').startswith('pgp-'):
malformed = u'expected micalg=pgp-..., got: {0}'.format(
p.get('micalg', 'nothing'))
sigs = []
if not malformed:
try:
sigs = crypto.verify_detached(m.get_payload(0).as_string(),
m.get_payload(1).get_payload())
except GPGProblem as e:
malformed = unicode(e)
add_signature_headers(m, sigs, malformed)
# handle OpenPGP encrypted data
elif (m.is_multipart() and
m.get_content_subtype() == 'encrypted' and
p.get('protocol', None) == app_pgp_enc and
'Version: 1' in m.get_payload(0).get_payload()):
# RFC 3156 is quite strict:
# * exactly two messages
# * the first is of type 'application/pgp-encrypted'
# * the first contains 'Version: 1'
# * the second is of type 'application/octet-stream'
# * the second contains the encrypted and possibly signed data
malformed = False
ct = m.get_payload(0).get_content_type()
if ct != app_pgp_enc:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
app_pgp_enc, ct)
want = 'application/octet-stream'
ct = m.get_payload(1).get_content_type()
if ct != want:
malformed = u'expected Content-Type: {0}, got: {1}'.format(want,
ct)
if not malformed:
try:
sigs, d = crypto.decrypt_verify(m.get_payload(1).get_payload())
except GPGProblem as e:
# signature verification failures end up here too if
# the combined method is used, currently this prevents
# the interpretation of the recovered plain text
# mail. maybe that's a feature.
malformed = unicode(e)
else:
# parse decrypted message
n = message_from_string(d)
# add the decrypted message to m. note that n contains
# all the attachments, no need to walk over n here.
m.attach(n)
# add any defects found
m.defects.extend(n.defects)
# there are two methods for both signed and encrypted
# data, one is called 'RFC 1847 Encapsulation' by
# RFC 3156, and one is the 'Combined method'.
if len(sigs) == 0:
# 'RFC 1847 Encapsulation', the signature is a
# detached signature found in the recovered mime
# message of type multipart/signed.
if X_SIGNATURE_VALID_HEADER in n:
for k in (X_SIGNATURE_VALID_HEADER,
X_SIGNATURE_MESSAGE_HEADER):
m[k] = n[k]
else:
# an encrypted message without signatures
# should arouse some suspicion, better warn
# the user
add_signature_headers(m, [], 'no signature found')
else:
# 'Combined method', the signatures are returned
# by the decrypt_verify function.
# note that if we reached this point, we know the
# signatures are valid. if they were not valid,
# the else block of the current try would not have
# been executed
add_signature_headers(m, sigs, '')
if malformed:
msg = u'Malformed OpenPGP message: {0}'.format(malformed)
content = email.message_from_string(msg.encode('utf-8'))
content.set_charset('utf-8')
m.attach(content)
return m
def message_from_string(s):
'''Reads a mail from the given string. This is the equivalent of
:func:`email.message_from_string` which does nothing but to wrap
the given string in a StringIO object and to call
:func:`email.message_from_file`.
Please refer to the documentation of :func:`message_from_file` for
details.
'''
return message_from_file(StringIO(s))
def extract_headers(mail, headers=None):
"""
returns subset of this messages headers as human-readable format:
all header values are decoded, the resulting string has
one line "KEY: VALUE" for each requested header present in the mail.
:param mail: the mail to use
:type mail: :class:`email.Message`
:param headers: headers to extract
:type headers: list of str
"""
headertext = u''
if headers is None:
headers = mail.keys()
for key in headers:
value = u''
if key in mail:
value = decode_header(mail.get(key, ''))
headertext += '%s: %s\n' % (key, value)
return headertext
def extract_body(mail, types=None):
"""
returns a body text string for given mail.
If types is `None`, `text/*` is used:
The exact preferred type is specified by the prefer_plaintext config option
which defaults to text/html.
:param mail: the mail to use
:type mail: :class:`email.Message`
:param types: mime content types to use for body string
:type types: list of str
"""
preferred = 'text/plain' if settings.get(
'prefer_plaintext') else 'text/html'
has_preferred = False
# see if the mail has our preferred type
if types is None:
has_preferred = list(typed_subpart_iterator(
mail, *preferred.split('/')))
body_parts = []
for part in mail.walk():
ctype = part.get_content_type()
if types is not None:
if ctype not in types:
continue
cd = part.get('Content-Disposition', '')
if cd.startswith('attachment'):
continue
# if the mail has our preferred type, we only keep this type
# note that if types != None, has_preferred always stays False
if has_preferred and ctype != preferred:
continue
enc = part.get_content_charset() or 'ascii'
raw_payload = part.get_payload(decode=True)
if ctype in ['text/plain', 'text/']:
raw_payload = string_decode(raw_payload, enc)
body_parts.append(string_sanitize(raw_payload))
else:
# get mime handler
key = 'copiousoutput'
handler, entry = settings.mailcap_find_match(ctype, key=key)
tempfile_name = None
stdin = None
if entry:
handler_raw_commandstring = entry['view']
# in case the mailcap defined command contains no '%s',
# we pipe the files content to the handling command via stdin
if '%s' in handler_raw_commandstring:
# open tempfile, respect mailcaps nametemplate
nametemplate = entry.get('nametemplate', '%s')
prefix, suffix = parse_mailcap_nametemplate(nametemplate)
tmpfile = tempfile.NamedTemporaryFile(delete=False,
prefix=prefix,
suffix=suffix)
# write payload to tmpfile
tmpfile.write(raw_payload)
tmpfile.close()
tempfile_name = tmpfile.name
else:
stdin = raw_payload
# read parameter, create handler command
parms = tuple(map('='.join, part.get_params()))
# create and call external command
cmd = mailcap.subst(entry['view'], ctype,
filename=tempfile_name, plist=parms)
logging.debug('command: %s' % cmd)
logging.debug('parms: %s' % str(parms))
cmdlist = split_commandstring(cmd)
# call handler
rendered_payload, errmsg, retval = helper.call_cmd(
cmdlist, stdin=stdin)
# remove tempfile
if tempfile_name:
os.unlink(tempfile_name)
if rendered_payload: # handler had output
body_parts.append(string_sanitize(rendered_payload))
return u'\n\n'.join(body_parts)
def decode_header(header, normalize=False):
"""
decode a header value to a unicode string
values are usually a mixture of different substrings
encoded in quoted printable using different encodings.
This turns it into a single unicode string
:param header: the header value
:type header: str
:param normalize: replace trailing spaces after newlines
:type normalize: bool
:rtype: unicode
"""
# If the value isn't ascii as RFC2822 prescribes,
# we just return the unicode bytestring as is
value = string_decode(header) # convert to unicode
try:
value = value.encode('ascii')
except UnicodeEncodeError:
return value
# some mailers send out incorrectly escaped headers
# and double quote the escaped realname part again. remove those
# RFC: 2047
regex = r'"(=\?.+?\?.+?\?[^ ?]+\?=)"'
value = re.sub(regex, r'\1', value)
logging.debug("unquoted header: |%s|", value)
# otherwise we interpret RFC2822 encoding escape sequences
valuelist = email.header.decode_header(value)
decoded_list = []
for v, enc in valuelist:
v = string_decode(v, enc)
decoded_list.append(string_sanitize(v))
value = u' '.join(decoded_list)
if normalize:
value = re.sub(r'\n\s+', r' ', value)
return value
def encode_header(key, value):
"""
encodes a unicode string as a valid header value
:param key: the header field this value will be stored in
:type key: str
:param value: the value to be encoded
:type value: unicode
"""
# handle list of "realname <email>" entries separately
if key.lower() in ['from', 'to', 'cc', 'bcc']:
rawentries = value.split(',')
encodedentries = []
for entry in rawentries:
m = re.search('\s*(.*)\s+<(.*\@.*\.\w*)>\s*$', entry)
if m: # If a realname part is contained
name, address = m.groups()
# try to encode as ascii, if that fails, revert to utf-8
# name must be a unicode string here
namepart = Header(name)
# append address part encoded as ascii
entry = '%s <%s>' % (namepart.encode(), address)
encodedentries.append(entry)
value = Header(', '.join(encodedentries))
else:
value = Header(value)
return value
def is_subdir_of(subpath, superpath):
# make both absolute
superpath = os.path.realpath(superpath)
subpath = os.path.realpath(subpath)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([subpath, superpath]) == superpath
|
a-sk/alot
|
alot/db/utils.py
|
Python
|
gpl-3.0
| 15,380
|
import os #for OS program calls
import sys #For Clean sys.exit command
import time #for sleep/pause
import RPi.GPIO as io #read the GPIO pins
io.setmode(io.BCM)
pir_pin = 17
screen_saver = False
io.setup(pir_pin, io.IN)
while True:
if screen_saver:
if io.input(pir_pin):
os.system("xscreensaver-command -deactivate")
screen_saver = False
else:
time.sleep(300)
os.system("xscreensaver-command -activate")
screen_saver = True
|
benekex2/smart_mirror
|
motiondetect.py
|
Python
|
gpl-3.0
| 449
|
# Copyright (c) 2011 - Rui Batista <ruiandrebatista@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import functools
import inspect
import sys
import pygame
def key_event(*keys):
def wrap(f):
f.__key_events__ = keys
return f
return wrap
class _KeyHandlerMeta(type):
def __new__(cls, name, bases, dct):
if not '__key_handlers__' in dct:
dct['__key_handlers__'] = {}
for v in dct.values():
if hasattr(v, '__key_events__') and callable(v):
for e in v.__key_events__:
dct['__key_handlers__'][e] = v
return type.__new__(cls, name, bases, dct)
class PygameMainLoop(object):
__metaclass__ = _KeyHandlerMeta
def __init__(self):
self._mainloop_running = False
self._retval = None
def run(self):
self.on_run()
self._mainloop_running = True
while self._mainloop_running:
self.run_before()
for event in self.get_events():
self.dispatch_event(event)
self.run_after()
return self._retval
def quit(self, retval=None):
self._retval = retval
self._mainloop_running = False
def dispatch_event(self, event):
if event.type == pygame.QUIT:
self.on_quit_event()
elif event.type == pygame.KEYDOWN and event.key in self.__key_handlers__:
self.__key_handlers__[event.key](self,event)
else:
self.on_event_default(event)
def on_quit_event(self):
pygame.quit()
sys.exit(0)
def get_events(self):
return pygame.event.get()
def run_before(self):
pass
def run_after(self):
pass
def on_run(self):
pass
def on_event_default(self, event):
pass
class VoiceDialog(PygameMainLoop):
@key_event(pygame.K_ESCAPE)
def escape(self, event):
self.quit(None)
def get_events(self):
return [pygame.event.wait()]
|
ragb/sudoaudio
|
sudoaudio/core.py
|
Python
|
gpl-3.0
| 2,629
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Code128 Barcode Detection & Analysis
(c) Charles Shiflett 2011
Finds Code128 barcodes in documents scanned in Grayscale at 300 dpi.
Usage:
Each page of the PDF must be converted to a grayscale PNG image, and should
be ordered as follows:
1001/1001-001.png
1001/1001-002.png
1001/1001-003.png
.
.
.
1099/1099-001.png
1099/1099-002.png
This program will find & enhance barcodes in those pages, and save it's
progress to a file of the same name, except with an extension of barcode.png.
"""
DEBUG=False
from PIL import Image
from PIL import ImageOps
import PIL.ImageDraw as draw
from glob import glob
import os
import re
import pdb
import sys
import numpy
import scipy.signal as ss
import math
import scipy.ndimage.interpolation
import scipy.weave
import logging
log = logging.getLogger('findBarcodes')
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
import filter
unAliasFilter = numpy.array( [ [ 0, 1, 0], [1, 4, 1], [ 0, 1, 0] ], numpy.int )
if DEBUG:
def debugger(type, value, tb):
pdb.pm()
sys.excepthook = debugger
sys.setrecursionlimit(32768)
filWidth= 102 # / 25
filHeight= 110 # / 30
def calcBarLength(length):
if length < 6:
return 1
elif length < 10:
return 2
elif length < 13:
return 3
else:
return 4
def convolve( im, filt, reshape ):
height, stride = im.shape
fh,fw = filt.shape
im = im.reshape( height * stride )
filt = filt.reshape( fh*fw )
newIm = numpy.zeros ( (height * stride), numpy.int )
code = """
int sum=0, pos;
int ys=0, fys=0;
for (int y=0; y < (height-(fh/2)); y++) {
for (int x=0; x < (stride-(fw/2)); x++) {
fys=sum=0;
pos=ys+x;
int th = ((height-y) < fh ) ? height-y : fh;
int tw = ((stride-x) < fw ) ? stride-x : fw;
for (int fy=0; fy < th; fy++) {
for (int fx=0; fx < tw; fx++) {
sum+=im[pos+fx]*filt[fys+fx];
}
fys+=fw;
pos+=stride;
}
newIm[ys+x] = sum;
}
ys+=stride;
}
"""
scipy.weave.inline(code,['height','stride','fh','fw','im','filt','newIm'])
if reshape:
return newIm.reshape(height,stride )
else:
return newIm
class barImage (object):
def __init__ ( self, im ):
self.im = numpy.array ( im.getdata() )
self.stride, self.height = im.size
self.im = self.im.reshape(self.height,self.stride)
# Note: im is indexed as [y][x] not...
def printImg( self, l=[], offset=0):
l = [ (i[1], i[2]) for i in l ]
print l
for y in range( 0, self.height-1):
output = []
for x in range( 5+offset, self.stride-1):
if x > 115+offset:
continue
i = self.im[y][x]
if (x,y) in l:
output.append("B")
elif i < 20:
output.append(".")
elif i < 64:
output.append("+")
elif i < 128:
output.append("*")
elif i < 196:
output.append("x")
else:
output.append("X")
print "%03d" % y, "".join(output)
print " 56789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789 123456789"
def applyFilter ( self, f, reshape=True ):
value = 0
filt = getattr( self, f, False)
if type(filt) == type(False):
filt = numpy.array( getattr(filter, f, False), dtype=numpy.int )
setattr( self, f, filt )
if type(filt) == type(False):
raise ValueError("Error: filter %s was not found in filter.py" % f)
return convolve( self.im, filt, reshape )
def findBarcode( self ):
results = self.applyFilter("scaledFilter", reshape=False)
list = [ (x[1], int(x[0] % self.stride), int(x[0] / self.stride)) for x in enumerate(results) if x[1] > 1000 ]
list.sort(reverse=True)
return list[0:20]
def unAlias(s):
"Remove dithering. "
#s.im= ss.convolve2d( s.im, unAliasFilter, mode="same" )
s.im=convolve( s.im, unAliasFilter, reshape=True )
s.im=numpy.piecewise(s.im, [ s.im > 1000 ], [255, 0])
return
""" Convolve operator does the following:
for y in range(1, s.height-1):
for x in range(1, s.stride-1):
if s.im[y][x-1] == s.im[y][x+1] == s.im[y+1][x] == s.im[y-1][x]:
s.im[y][x] = s.im[y][x+1]
return
"""
def bw( self, whitePoint=64):
self.im=numpy.piecewise(self.im, [self.im < whitePoint, self.im >= whitePoint], [255, 0])
#self.im=self.vApplyBW( self.im, whitePoint )
def virtualLine(self, x1, y1, x2, y2, ox=0, oy=0):
totalLength = math.sqrt(math.pow(x2-x1,2) + math.pow(y2-y1,2))
if totalLength < 300:
return []
if x1 < x2:
sx,sy,ex,ey=(x1,y1,x2,y2)
else:
sx,sy,ex,ey=(x2,y2,x1,y1)
xgain = float(ex-sx)/totalLength
ygain = float(ey-sy)/totalLength
if ex - sx < 150:
# Skip vertical codes, save them for the next run.
return []
if sx < 1 or (ex+ox) >= self.stride or sx > self.stride:
return []
if not (1< sy <self.height) or not (1< sy+ygain*totalLength <self.height):
return []
#slope = float(h2-h1)/(w2-w1)
newLine = numpy.zeros( shape=(totalLength), dtype=int )
code = """
float x=sx, y=sy;
for ( int i=1; i < int(totalLength); i++ ) {
int top = stride*int(y) + int(x),
bot = stride*int(y+1) + int(x);
float xr = x-int(x),
xl = 1-xr,
yt = y-int(y),
yb = 1-yt;
newLine[i]= im[top]*xr*yt +
im[top-1]*xl*yt +
im[bot]*xr*yb +
im[bot-1]*xl*yb;
x+=xgain;
y+=ygain;
}
"""
stride, im = self.stride, self.im
scipy.weave.inline(code,['im', 'stride', \
'newLine', 'totalLength', 'ygain', 'xgain', 'sx', 'sy'])
if DEBUG:
log.debug( "".join(
[ chr( 0x2e + int(x/6.07142857142857142857) ) for x in list(newLine) ] ) )
return newLine
def checkLineCharacteristics( self, line ):
whiteCount= blackCount= 0
if 300 < len(line) < 475:
for i in line:
if int(i) < 128:
whiteCount+=1
else:
blackCount+=1
if whiteCount >= 18:
return False
if blackCount > 1:
whiteCount=0
blackCount=0
else:
return False
return True
def getValidPoint ( self, point, possible ):
for endpoint in possible:
#print point, endpoint
found = True
for i in range ( 8, 50, 10 ):
if not found:
continue
#print point, endpoint, i
line = self.virtualLine(point[0]+2, point[1]+i, endpoint[0], endpoint[1]+i)
if not self.checkLineCharacteristics(line):
found = False
#print "False"
#print "True"
if found:
return endpoint
return False
def getValidPair ( self, l, r ):
"""Returns the first pair that is a barcode and is located at the top
edges of a barcode. """
if not l or not r:
return False
l.sort( key=lambda x: x[1] )
r.sort( key=lambda x: x[1] )
if l[0][1] > r[0][1]:
r.sort( key=lambda x: x[0], reverse=True )
res = self.getValidPoint( l[0], r )
if not res:
return self.getValidPair( l[1:], r)
return l[0], res
else:
l.sort( key=lambda x: x[0], reverse=False )
res = self.getValidPoint( r[0], l )
if not res:
return self.getValidPair( l, r[1:] )
return res, r[0]
def removeNeighbors ( self, l, rev ):
l.sort( key= lambda x: x[0], reverse=rev )
restart = False
sizeOfArray = len(l)-1
for i in range (1, sizeOfArray):
for j in range(i, sizeOfArray):
if abs( l[i-1][1] - l[j][1] ) < 5:
restart = True
l[j] = False
if restart==True:
return self.removeNeighbors ([ x for x in l if x], rev)
return l
def getCode ( self, barcode ):
"""
Return a single code from a code 128 barcode.
"""
code=[]
start = False
trend = 1
for pos, c in enumerate(barcode):
if (pos+1) >= len(barcode):
continue
if not start:
if c > int(10*250): # Ignore leading white space
start=True
level = barcode[pos+1]
code.append(pos)
continue
if abs(level - c) > 1250 and abs(level-barcode[pos+1]) > 1250:
if (trend<0 and (level-c)>0) or (trend>0 and (level-c)<0):
# Trend is in the same direction we are going, ignore.
continue
code.append(pos)
if trend > 0:
trend=-1
else:
trend=1
level = c
if trend > 0:
level = max(c, level)
else:
level = min(c, level)
if len(code) >= 7:
return code, barcode[pos:]
return False
def applyHeuristics ( self, barcode=[5,] ):
"""
Try to determine the numerical values of barcode image.
@barcode: list to prepend to output. (defaults to [5,])
@return: barcode weights (i.e. 211214... prepended with pre)
"""
rotated = numpy.rot90(self.im, 3)
values = [ int(sum( list(line)[:30] )) for line in rotated ]
characters=[]
codes=True
while (codes):
codes = self.getCode(values)
if codes:
if DEBUG:
print codes[0][0], codes[0][-1]
print "".join([ "%c" % int(v/255+0x5f) for v in values[codes[0][0]:codes[0][-1]] ])
print codes[0]
characters.append(values[codes[0][0]:codes[0][-1]])
values=codes[1]
return False
def findBarcodeLimits( self, barType ):
#origImg = self.im
"""
find the edges of a barcode.
@return: left and upper-right corner or right & upper-left corner of barcode
"""
filterName = "%sEdgeFilter%s" % ("left", "Hard")
result = self.applyFilter(filterName, reshape=False)
leftSide, rightSide = [], []
lSideLim, rSideLim = (self.stride / 2), ((self.stride/2)+1)
h,w=self.height,self.stride
filterCutoff = 18000
lx = numpy.zeros( len(result), numpy.int )
ly = numpy.zeros( len(result), numpy.int )
ry = numpy.zeros( len(result), numpy.int )
rx = numpy.zeros( len(result), numpy.int )
rets = numpy.zeros ( 2, numpy.int )
l,r = 0,0
filterlen= len(result)
code = """
int l=0, r=0; /* This code is surprisingly slow in python */
for (int i=0; i < filterlen; i++) {
if (result[i] < filterCutoff)
continue;
if (i%w < lSideLim) {
ly[l] = i/w;
lx[l++] = i%w;
}
if (i%w > rSideLim) {
ry[r] = i/w;
rx[r++] = i%w;
}
rets[0] = l;
rets[1] = r;
}
"""
scipy.weave.inline(code,['lx','rx','ry','ly','filterCutoff','filterlen','result', 'w', 'rSideLim', 'lSideLim','rets'])
rx = rx[:rets[1]]
lx = lx[:rets[0]]
leftSide = zip(lx, ly)
rightSide= zip(rx, ry)
# We need to check the lists we generated to make sure we really have
# the furthest block for a specific height range... We don't want to
# be affected by artifacting which results in minor height variation.
leftSide.sort (key = lambda x: x[0] )
rightSide.sort(key = lambda x: x[0] )
leftSide = self.removeNeighbors( leftSide, False )
#print "LEFT: ", leftSide
#print "RIGHT: ", rightSide
validPair = self.getValidPair ( leftSide, rightSide )
if not validPair:
return False
return ( (validPair[0][0]+2,validPair[0][1]+2), (validPair[1][0]+8, validPair[1][1]+2) )
hh=0
def straightenBarcode( im, filterName="Soft", prefix="" ):
global hh, newImage
hh+=1
# Find the barcode, and straighten it.
im.bw()
im.unAlias()
limits = im.findBarcodeLimits(filterName)
if limits:
if DEBUG:
newImage.putdata(im.im.reshape(im.stride*im.height))
newImage = ImageOps.invert(newImage)
d = draw.Draw(newImage)
d.line((limits[0][0], limits[0][1], limits[1][0], limits[1][1]), fill=0)
newImage.save("%s.barcode.line.%05d.png" % (prefix, hh) )
angle= ( float(limits[1][1] - limits[0][1]) /
float(limits[1][0] - limits[0][0]) )
angle= numpy.arctan(angle) * (180/math.pi)
else:
return False
im.im = scipy.ndimage.interpolation.rotate( im.im, angle, reshape=False )
return True
def createBarcode( ar, nb ):
ar=numpy.rot90(ar, 3)
b,pos=1,0
lastColor=False
if not nb:
return
for bars in nb:
if b % 2:
fill=255
else:
fill=0
b+=1
if pos > len(ar)-16:
continue
for i in range(0, bars*3):
ar[pos].fill(fill)
pos+=1
for i in range(pos, len(ar)):
ar[pos].fill(255)
pos+=1
return numpy.rot90(ar)
def doPostBarcodeAnalysis(image, prefix):
image.save("%s.barcode.post.png" % prefix )
bar = barImage( image )
bar.bw()
nb = bar.applyHeuristics()
bar.im = createBarcode( bar.im, nb )
image.putdata(bar.im.reshape(bar.stride*bar.height))
#image = image.crop((1,2,450,88))
image.save("%s.barcode.heur.png" % prefix )
newImage = False
def startRecognition( infile, rotation=False, z=0 ):
global newImage
prefix = infile[:8]
im = Image.open(infile)
if rotation:
im = im.rotate(rotation)
width, height = im.size
resized = im.resize( ( width/25, height/30 ), Image.BICUBIC )
resized = ImageOps.invert(resized)
imgData = barImage( resized )
foundBarcode, newImage, newBar = False, False, False
for probable_barcode in imgData.findBarcode():
z+=1
# Try the first 20 barcodes, and see if one of them is legit.
if foundBarcode:
continue
try:
x1, y1 = (probable_barcode[1]-3)*25, (probable_barcode[2]) * 30
x2, y2 = x1+635, y1+265
if x2 > im.size[0] or y2 > im.size[1]:
x2,y2 = im.size[0], im.size[1]
x1,y1 = im.size[0]-800, im.size[1]-265
newImage = im.crop((x1,y1,x2,y2))
newBar = barImage(newImage)
foundBarcode = straightenBarcode ( newBar, "Hard", prefix=prefix )
if DEBUG and not foundBarcode:
smoo = im.crop( (x1,y1,x2,y2) )
smoo.save("%s.fail.%03d.barcode.png" % (prefix, z) )
print "Z: ", z
except:
foundBarcode = False
raise
if foundBarcode:
log.info("Found barcode for %s." % prefix )
newImage.putdata(newBar.im.reshape(newBar.stride*newBar.height))
newImage = ImageOps.invert(newImage)
newImage.save("%s.barcode.pre.png" % prefix )
try:
(x1, y1),(x2,y2) = newBar.findBarcodeLimits("Hard")
doPostBarcodeAnalysis(newImage.crop((x1-40,y1+1,x1+520,y1+90)), prefix )
except:
pass
elif not rotation:
startRecognition( infile, rotation=90, z=z )
else:
log.info("No barcode found for %s.", prefix)
validImage = re.compile('[0-9]{4}-[0-9]{3}.png')
didCommandLine = False
for infile in sys.argv:
if validImage.match(infile):
didCommandLine = True
startRecognition( infile )
if not didCommandLine:
for infile in glob("????-???.png"):
startRecognition( infile )
|
cbears/octoform
|
ocr/findBarcode.py
|
Python
|
gpl-3.0
| 15,096
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import sys
def main():
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
print 'Need to clobber after ICU52 roll.'
print 'Landmines test.'
print 'Activating MSVS 2013.'
print 'Revert activation of MSVS 2013.'
print 'Activating MSVS 2013 again.'
print 'Clobber after ICU roll.'
print 'Moar clobbering...'
print 'Remove build/android.gypi'
print 'Cleanup after windows ninja switch attempt.'
print 'Switching to pinned msvs toolchain.'
print 'Clobbering to hopefully resolve problem with mksnapshot'
print 'Clobber after ICU roll.'
print 'Clobber after Android NDK update.'
print 'Clober to fix windows build problems.'
print 'Clober again to fix windows build problems.'
print 'Clobber to possibly resolve failure on win-32 bot.'
return 0
if __name__ == '__main__':
sys.exit(main())
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/v8/gypfiles/get_landmines.py
|
Python
|
gpl-3.0
| 1,128
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2017 Samuele Carcagno <sam.carcagno@gmail.com>
# This file is part of pysoundanalyser
# pysoundanalyser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pysoundanalyser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pysoundanalyser. If not, see <http://www.gnu.org/licenses/>.
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
from .pyqtver import*
if pyqtversion == 4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import QComboBox, QDialog, QDialogButtonBox, QDoubleValidator, QGridLayout, QIntValidator, QLabel, QLineEdit, QVBoxLayout
elif pyqtversion == -4:
from PySide import QtGui, QtCore
from PySide.QtGui import QComboBox, QDialog, QDialogButtonBox, QDoubleValidator, QGridLayout, QIntValidator, QLabel, QLineEdit, QVBoxLayout
elif pyqtversion == 5:
from PyQt5 import QtGui, QtCore
from PyQt5.QtGui import QDoubleValidator, QIntValidator
from PyQt5.QtWidgets import QComboBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QVBoxLayout
class applyFIR2PresetsDialog(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.prm = parent.prm
self.currLocale = self.parent().prm['data']['currentLocale']
self.currLocale.setNumberOptions(self.currLocale.OmitGroupSeparator | self.currLocale.RejectGroupSeparator)
vbl = QVBoxLayout()
self.grid = QGridLayout()
filterTypeLabel = QLabel(self.tr('Filter Type: '))
self.filterChooser = QComboBox()
self.filterChooser.addItems([self.tr('lowpass'), self.tr('highpass'), self.tr('bandpass'), self.tr('bandstop')])
self.filterChooser.setCurrentIndex(0)
self.grid.addWidget(self.filterChooser, 0, 1)
self.filterChooser.currentIndexChanged[int].connect(self.onChangeFilterType)
self.filterOrderLabel = QLabel(self.tr('Filter Order: '))
self.filterOrderWidget = QLineEdit('256')
self.filterOrderWidget.setValidator(QIntValidator(self))
self.grid.addWidget(self.filterOrderLabel, 0, 2)
self.grid.addWidget(self.filterOrderWidget, 0, 3)
self.currFilterType = self.tr('lowpass')
self.cutoffLabel = QLabel(self.tr('Cutoff: '))
self.endCutoffLabel = QLabel(self.tr('End Transition Band = Cutoff *'))
self.cutoffWidget = QLineEdit('')
self.cutoffWidget.setValidator(QDoubleValidator(self))
endCutoff = 1.2
self.endCutoffWidget = QLineEdit(self.currLocale.toString(endCutoff))
self.endCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.cutoffLabel, 2, 1)
self.grid.addWidget(self.cutoffWidget, 2, 2)
self.grid.addWidget(self.endCutoffLabel, 2, 3)
self.grid.addWidget(self.endCutoffWidget, 2, 4)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
vbl.addLayout(self.grid)
vbl.addWidget(buttonBox)
self.setLayout(vbl)
self.setWindowTitle(self.tr("Apply Filter"))
def onChangeFilterType(self):
prevFilterType = self.currFilterType
self.currFilterType = str(self.filterChooser.currentText())
if self.currFilterType != prevFilterType:
if prevFilterType == self.tr('lowpass'):
self.grid.removeWidget(self.cutoffLabel)
#self.cutoffLabel.setParent(None)
self.cutoffLabel.deleteLater()
self.grid.removeWidget(self.endCutoffLabel)
#self.endCutoffLabel.setParent(None)
self.endCutoffLabel.deleteLater()
self.grid.removeWidget(self.cutoffWidget)
#self.cutoffWidget.setParent(None)
self.cutoffWidget.deleteLater()
self.grid.removeWidget(self.endCutoffWidget)
#self.endCutoffWidget.setParent(None)
self.endCutoffWidget.deleteLater()
elif prevFilterType == self.tr('highpass'):
self.grid.removeWidget(self.cutoffLabel)
#self.cutoffLabel.setParent(None)
self.cutoffLabel.deleteLater()
self.grid.removeWidget(self.startCutoffLabel)
#self.startCutoffLabel.setParent(None)
self.startCutoffLabel.deleteLater()
self.grid.removeWidget(self.cutoffWidget)
#self.cutoffWidget.setParent(None)
self.cutoffWidget.deleteLater()
self.grid.removeWidget(self.startCutoffWidget)
#self.startCutoffWidget.setParent(None)
self.startCutoffWidget.deleteLater()
elif prevFilterType == self.tr('bandpass') or prevFilterType == self.tr('bandstop'):
self.grid.removeWidget(self.lowerCutoffLabel)
#self.lowerCutoffLabel.setParent(None)
self.lowerCutoffLabel.deleteLater()
self.grid.removeWidget(self.startCutoffLabel)
#self.startCutoffLabel.setParent(None)
self.startCutoffLabel.deleteLater()
self.grid.removeWidget(self.lowerCutoffWidget)
#self.lowerCutoffWidget.setParent(None)
self.lowerCutoffWidget.deleteLater()
self.grid.removeWidget(self.startCutoffWidget)
#self.startCutoffWidget.setParent(None)
self.startCutoffWidget.deleteLater()
self.grid.removeWidget(self.higherCutoffLabel)
#self.higherCutoffLabel.setParent(None)
self.higherCutoffLabel.deleteLater()
self.grid.removeWidget(self.endCutoffLabel)
#self.endCutoffLabel.setParent(None)
self.endCutoffLabel.deleteLater()
self.grid.removeWidget(self.higherCutoffWidget)
#self.higherCutoffWidget.setParent(None)
self.higherCutoffWidget.deleteLater()
self.grid.removeWidget(self.endCutoffWidget)
#self.endCutoffWidget.setParent(None)
self.endCutoffWidget.deleteLater()
if self.currFilterType == self.tr('lowpass'):
self.cutoffLabel = QLabel(self.tr('Cutoff: '))
self.endCutoffLabel = QLabel(self.tr('End Transition Band = Cutoff *'))
self.cutoffWidget = QLineEdit('')
self.cutoffWidget.setValidator(QDoubleValidator(self))
endCutoff = 1.2
self.endCutoffWidget = QLineEdit(self.currLocale.toString(endCutoff))
self.endCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.cutoffLabel, 2, 1)
self.grid.addWidget(self.cutoffWidget, 2, 2)
self.grid.addWidget(self.endCutoffLabel, 2, 3)
self.grid.addWidget(self.endCutoffWidget, 2, 4)
elif self.currFilterType == self.tr('highpass'):
self.cutoffLabel = QLabel(self.tr('Cutoff: '))
self.startCutoffLabel = QLabel(self.tr('Start Transition Band = Cutoff *'))
self.cutoffWidget = QLineEdit('')
self.cutoffWidget.setValidator(QDoubleValidator(self))
startCutoff = 0.8
self.startCutoffWidget = QLineEdit(self.currLocale.toString(startCutoff))
self.startCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.cutoffLabel, 2, 1)
self.grid.addWidget(self.cutoffWidget, 2, 2)
self.grid.addWidget(self.startCutoffLabel, 2, 3)
self.grid.addWidget(self.startCutoffWidget, 2, 4)
elif self.currFilterType == self.tr('bandpass'):
self.lowerCutoffLabel = QLabel(self.tr('Lower Cutoff: '))
self.startCutoffLabel = QLabel(self.tr('Start Transition Band = Cutoff *'))
self.lowerCutoffWidget = QLineEdit('')
self.lowerCutoffWidget.setValidator(QDoubleValidator(self))
startCutoff = 0.8
self.startCutoffWidget = QLineEdit(self.currLocale.toString(startCutoff))
self.startCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.lowerCutoffLabel, 2, 1)
self.grid.addWidget(self.lowerCutoffWidget, 2, 2)
self.grid.addWidget(self.startCutoffLabel, 2, 3)
self.grid.addWidget(self.startCutoffWidget, 2, 4)
self.higherCutoffLabel = QLabel(self.tr('Higher Cutoff: '))
self.endCutoffLabel = QLabel(self.tr('End Transition Band = Cutoff *'))
self.higherCutoffWidget = QLineEdit('')
self.higherCutoffWidget.setValidator(QDoubleValidator(self))
endCutoff = 1.2
self.endCutoffWidget = QLineEdit(self.currLocale.toString(endCutoff))
self.endCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.higherCutoffLabel, 3, 1)
self.grid.addWidget(self.higherCutoffWidget, 3, 2)
self.grid.addWidget(self.endCutoffLabel, 3, 3)
self.grid.addWidget(self.endCutoffWidget, 3, 4)
elif self.currFilterType == self.tr('bandstop'):
self.lowerCutoffLabel = QLabel(self.tr('Lower Cutoff: '))
self.endCutoffLabel = QLabel(self.tr('End Transition Band = Cutoff *'))
self.lowerCutoffWidget = QLineEdit('')
self.lowerCutoffWidget.setValidator(QDoubleValidator(self))
endCutoff = 1.2
self.endCutoffWidget = QLineEdit(self.currLocale.toString(endCutoff))
self.endCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.lowerCutoffLabel, 2, 1)
self.grid.addWidget(self.lowerCutoffWidget, 2, 2)
self.grid.addWidget(self.endCutoffLabel, 2, 3)
self.grid.addWidget(self.endCutoffWidget, 2, 4)
self.higherCutoffLabel = QLabel(self.tr('Higher Cutoff: '))
self.startCutoffLabel = QLabel(self.tr('Start Transition Band = Cutoff *'))
self.higherCutoffWidget = QLineEdit('')
self.higherCutoffWidget.setValidator(QDoubleValidator(self))
startCutoff = 0.8
self.startCutoffWidget = QLineEdit(self.currLocale.toString(startCutoff))
self.startCutoffWidget.setValidator(QDoubleValidator(self))
self.grid.addWidget(self.higherCutoffLabel, 3, 1)
self.grid.addWidget(self.higherCutoffWidget, 3, 2)
self.grid.addWidget(self.startCutoffLabel, 3, 3)
self.grid.addWidget(self.startCutoffWidget, 3, 4)
|
sam81/pysoundanalyser
|
pysoundanalyser/dialog_apply_filter.py
|
Python
|
gpl-3.0
| 11,571
|
'''
Copyright (C) 2016 Bastille Networks
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
def i_code (code3):
return code3[0]
def o_code (code3):
if len (code3) >= 2:
return code3[1]
else:
return code3[0]
def tap_code (code3):
if len (code3) >= 3:
return code3[2]
else:
return code3[0]
def i_type (code3):
return char_to_type[i_code (code3)]
def o_type (code3):
return char_to_type[o_code (code3)]
def tap_type (code3):
return char_to_type[tap_code (code3)]
char_to_type = {}
char_to_type['s'] = 'short'
char_to_type['i'] = 'int'
char_to_type['f'] = 'float'
char_to_type['c'] = 'gr_complex'
char_to_type['b'] = 'unsigned char'
|
BastilleResearch/gr-nordic
|
python/build_utils_codes.py
|
Python
|
gpl-3.0
| 1,289
|
import pygame
sprite = {}
sprite['image'] = pygame.image.load("test/font6x8_normal_w.png")
sprite['width'] = 6
sprite['height'] = 8
def Addr():
return sprite
|
lamestation/LEAM
|
media/gfx_font6x8.py
|
Python
|
gpl-3.0
| 165
|
from __future__ import division, print_function
import json
from collections import Iterable, OrderedDict, namedtuple
import numpy as np
from six import string_types
def isnamedtuple(obj):
"""Heuristic check if an object is a namedtuple."""
return isinstance(obj, tuple) \
and hasattr(obj, "_fields") \
and hasattr(obj, "_asdict") \
and callable(obj._asdict)
def serialize(data):
if data is None or isinstance(data, (bool, int, float, str, string_types)):
return data
if isinstance(data, list):
return [serialize(val) for val in data]
if isinstance(data, OrderedDict):
return {"py/collections.OrderedDict":
[[serialize(k), serialize(v)] for k, v in data.items()]}
if isnamedtuple(data):
return {"py/collections.namedtuple": {
"type": type(data).__name__,
"fields": list(data._fields),
"values": [serialize(getattr(data, f)) for f in data._fields]}}
if isinstance(data, dict):
if all(isinstance(k, str) for k in data):
return {k: serialize(v) for k, v in data.items()}
return {"py/dict": [[serialize(k), serialize(v)] for k, v in data.items()]}
if isinstance(data, tuple):
return {"py/tuple": [serialize(val) for val in data]}
if isinstance(data, set):
return {"py/set": [serialize(val) for val in data]}
if isinstance(data, np.ndarray):
return {"py/numpy.ndarray": {
"values": data.tolist(),
"dtype": str(data.dtype)}}
raise TypeError("Type %s not data-serializable" % type(data))
def restore(dct):
if "py/dict" in dct:
return dict(dct["py/dict"])
if "py/tuple" in dct:
return tuple(dct["py/tuple"])
if "py/set" in dct:
return set(dct["py/set"])
if "py/collections.namedtuple" in dct:
data = dct["py/collections.namedtuple"]
return namedtuple(data["type"], data["fields"])(*data["values"])
if "py/numpy.ndarray" in dct:
data = dct["py/numpy.ndarray"]
return np.array(data["values"], dtype=data["dtype"])
if "py/collections.OrderedDict" in dct:
return OrderedDict(dct["py/collections.OrderedDict"])
return dct
def data_to_json(data):
return json.dumps(serialize(data))
def json_to_data(s):
return json.loads(s, object_hook=restore)
|
chemlab/chemlab
|
chemlab/core/serialization.py
|
Python
|
gpl-3.0
| 2,379
|
"""Provides compatibility with first-generation host delegation options in ansible-test."""
from __future__ import annotations
import argparse
import dataclasses
import enum
import os
import types
import typing as t
from ..constants import (
CONTROLLER_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from ..util import (
ApplicationError,
display,
filter_args,
sorted_versions,
str_to_version,
)
from ..docker_util import (
docker_available,
)
from ..completion import (
DOCKER_COMPLETION,
REMOTE_COMPLETION,
filter_completion,
)
from ..host_configs import (
ControllerConfig,
ControllerHostConfig,
DockerConfig,
FallbackDetail,
FallbackReason,
HostConfig,
HostContext,
HostSettings,
NativePythonConfig,
NetworkInventoryConfig,
NetworkRemoteConfig,
OriginConfig,
PosixRemoteConfig,
VirtualPythonConfig,
WindowsInventoryConfig,
WindowsRemoteConfig,
)
def filter_python(version, versions): # type: (t.Optional[str], t.Optional[t.List[str]]) -> t.Optional[str]
"""If a Python version is given and is in the given version list, return that Python version, otherwise return None."""
return version if version in versions else None
def controller_python(version): # type: (t.Optional[str]) -> t.Optional[str]
"""If a Python version is given and is supported by the controller, return that Python version, otherwise return None."""
return filter_python(version, CONTROLLER_PYTHON_VERSIONS)
def get_fallback_remote_controller(): # type: () -> str
"""Return the remote fallback platform for the controller."""
platform = 'freebsd' # lower cost than RHEL and macOS
candidates = [item for item in filter_completion(REMOTE_COMPLETION).values() if item.controller_supported and item.platform == platform]
fallback = sorted(candidates, key=lambda value: str_to_version(value.version), reverse=True)[0]
return fallback.name
def get_option_name(name): # type: (str) -> str
"""Return a command-line option name from the given option name."""
if name == 'targets':
name = 'target'
return f'--{name.replace("_", "-")}'
class PythonVersionUnsupportedError(ApplicationError):
"""A Python version was requested for a context which does not support that version."""
def __init__(self, context, version, versions):
super().__init__(f'Python {version} is not supported by environment `{context}`. Supported Python version(s) are: {", ".join(versions)}')
class PythonVersionUnspecifiedError(ApplicationError):
"""A Python version was not specified for a context which is unknown, thus the Python version is unknown."""
def __init__(self, context):
super().__init__(f'A Python version was not specified for environment `{context}`. Use the `--python` option to specify a Python version.')
class ControllerNotSupportedError(ApplicationError):
"""Option(s) were specified which do not provide support for the controller and would be ignored because they are irrelevant for the target."""
def __init__(self, context):
super().__init__(f'Environment `{context}` does not provide a Python version supported by the controller.')
class OptionsConflictError(ApplicationError):
"""Option(s) were specified which conflict with other options."""
def __init__(self, first, second):
super().__init__(f'Options `{" ".join(first)}` cannot be combined with options `{" ".join(second)}`.')
@dataclasses.dataclass(frozen=True)
class LegacyHostOptions:
"""Legacy host options used prior to the availability of separate controller and target host configuration."""
python: t.Optional[str] = None
python_interpreter: t.Optional[str] = None
local: t.Optional[bool] = None
venv: t.Optional[bool] = None
venv_system_site_packages: t.Optional[bool] = None
remote: t.Optional[str] = None
remote_provider: t.Optional[str] = None
docker: t.Optional[str] = None
docker_privileged: t.Optional[bool] = None
docker_seccomp: t.Optional[str] = None
docker_memory: t.Optional[int] = None
windows: t.Optional[t.List[str]] = None
platform: t.Optional[t.List[str]] = None
platform_collection: t.Optional[t.List[t.Tuple[str, str]]] = None
platform_connection: t.Optional[t.List[t.Tuple[str, str]]] = None
inventory: t.Optional[str] = None
@staticmethod
def create(namespace): # type: (t.Union[argparse.Namespace, types.SimpleNamespace]) -> LegacyHostOptions
"""Create legacy host options from the given namespace."""
kwargs = {field.name: getattr(namespace, field.name, None) for field in dataclasses.fields(LegacyHostOptions)}
if kwargs['python'] == 'default':
kwargs['python'] = None
return LegacyHostOptions(**kwargs)
@staticmethod
def purge_namespace(namespace): # type: (t.Union[argparse.Namespace, types.SimpleNamespace]) -> None
"""Purge legacy host options fields from the given namespace."""
for field in dataclasses.fields(LegacyHostOptions): # type: dataclasses.Field
if hasattr(namespace, field.name):
delattr(namespace, field.name)
@staticmethod
def purge_args(args): # type: (t.List[str]) -> t.List[str]
"""Purge legacy host options from the given command line arguments."""
fields = dataclasses.fields(LegacyHostOptions) # type: t.Tuple[dataclasses.Field, ...]
filters = {get_option_name(field.name): 0 if field.type is t.Optional[bool] else 1 for field in fields} # type: t.Dict[str, int]
return filter_args(args, filters)
def get_options_used(self): # type: () -> t.Tuple[str, ...]
"""Return a tuple of the command line options used."""
fields = dataclasses.fields(self) # type: t.Tuple[dataclasses.Field, ...]
options = tuple(sorted(get_option_name(field.name) for field in fields if getattr(self, field.name)))
return options
class TargetMode(enum.Enum):
"""Type of provisioning to use for the targets."""
WINDOWS_INTEGRATION = enum.auto() # windows-integration
NETWORK_INTEGRATION = enum.auto() # network-integration
POSIX_INTEGRATION = enum.auto() # integration
SANITY = enum.auto() # sanity
UNITS = enum.auto() # units
SHELL = enum.auto() # shell
NO_TARGETS = enum.auto() # coverage
@property
def one_host(self):
"""Return True if only one host (the controller) should be used, otherwise return False."""
return self in (TargetMode.SANITY, TargetMode.UNITS, TargetMode.NO_TARGETS)
@property
def no_fallback(self):
"""Return True if no fallback is acceptable for the controller (due to options not applying to the target), otherwise return False."""
return self in (TargetMode.WINDOWS_INTEGRATION, TargetMode.NETWORK_INTEGRATION, TargetMode.NO_TARGETS)
@property
def multiple_pythons(self):
"""Return True if multiple Python versions are allowed, otherwise False."""
return self in (TargetMode.SANITY, TargetMode.UNITS)
@property
def has_python(self):
"""Return True if this mode uses Python, otherwise False."""
return self in (TargetMode.POSIX_INTEGRATION, TargetMode.SANITY, TargetMode.UNITS, TargetMode.SHELL)
def convert_legacy_args(
argv, # type: t.List[str]
args, # type: t.Union[argparse.Namespace, types.SimpleNamespace]
mode, # type: TargetMode
): # type: (...) -> HostSettings
"""Convert pre-split host arguments in the given namespace to their split counterparts."""
old_options = LegacyHostOptions.create(args)
old_options.purge_namespace(args)
new_options = [
'--controller',
'--target',
'--target-python',
]
used_old_options = old_options.get_options_used()
used_new_options = [name for name in new_options if name in argv]
if used_old_options:
if used_new_options:
raise OptionsConflictError(used_old_options, used_new_options)
controller, targets, controller_fallback = get_legacy_host_config(mode, old_options)
if controller_fallback:
if mode.one_host:
display.info(controller_fallback.message, verbosity=1)
else:
display.warning(controller_fallback.message)
used_default_pythons = mode in (TargetMode.SANITY, TargetMode.UNITS) and not native_python(old_options)
else:
controller = args.controller or OriginConfig()
controller_fallback = None
if mode == TargetMode.NO_TARGETS:
targets = []
used_default_pythons = False
elif args.targets:
targets = args.targets
used_default_pythons = False
else:
targets = default_targets(mode, controller)
used_default_pythons = mode in (TargetMode.SANITY, TargetMode.UNITS)
args.controller = controller
args.targets = targets
if used_default_pythons:
targets = t.cast(t.List[ControllerConfig], targets)
skipped_python_versions = sorted_versions(list(set(SUPPORTED_PYTHON_VERSIONS) - {target.python.version for target in targets}))
else:
skipped_python_versions = []
filtered_args = old_options.purge_args(argv)
filtered_args = filter_args(filtered_args, {name: 1 for name in new_options})
host_settings = HostSettings(
controller=controller,
targets=targets,
skipped_python_versions=skipped_python_versions,
filtered_args=filtered_args,
controller_fallback=controller_fallback,
)
return host_settings
def controller_targets(
mode, # type: TargetMode
options, # type: LegacyHostOptions
controller, # type: ControllerHostConfig
): # type: (...) -> t.List[ControllerConfig]
"""Return the configuration for controller targets."""
python = native_python(options)
if python:
targets = [ControllerConfig(python=python)]
else:
targets = default_targets(mode, controller)
return targets
def native_python(options): # type: (LegacyHostOptions) -> t.Optional[NativePythonConfig]
"""Return a NativePythonConfig for the given version if it is not None, otherwise return None."""
if not options.python and not options.python_interpreter:
return None
return NativePythonConfig(version=options.python, path=options.python_interpreter)
def get_legacy_host_config(
mode, # type: TargetMode
options, # type: LegacyHostOptions
): # type: (...) -> t.Tuple[HostConfig, t.List[HostConfig], t.Optional[FallbackDetail]]
"""
Returns controller and target host configs derived from the provided legacy host options.
The goal is to match the original behavior, by using non-split testing whenever possible.
When the options support the controller, use the options for the controller and use ControllerConfig for the targets.
When the options do not support the controller, use the options for the targets and use a default controller config influenced by the options.
"""
venv_fallback = 'venv/default'
docker_fallback = 'default'
remote_fallback = get_fallback_remote_controller()
controller_fallback = None # type: t.Optional[t.Tuple[str, str, FallbackReason]]
if options.venv:
if controller_python(options.python) or not options.python:
controller = OriginConfig(python=VirtualPythonConfig(version=options.python or 'default', system_site_packages=options.venv_system_site_packages))
else:
controller_fallback = f'origin:python={venv_fallback}', f'--venv --python {options.python}', FallbackReason.PYTHON
controller = OriginConfig(python=VirtualPythonConfig(version='default', system_site_packages=options.venv_system_site_packages))
if mode in (TargetMode.SANITY, TargetMode.UNITS):
targets = controller_targets(mode, options, controller)
# Target sanity tests either have no Python requirements or manage their own virtual environments.
# Thus there is no point in setting up virtual environments ahead of time for them.
if mode == TargetMode.UNITS:
targets = [ControllerConfig(python=VirtualPythonConfig(version=target.python.version, path=target.python.path,
system_site_packages=options.venv_system_site_packages)) for target in targets]
else:
targets = [ControllerConfig(python=VirtualPythonConfig(version=options.python or 'default',
system_site_packages=options.venv_system_site_packages))]
elif options.docker:
docker_config = filter_completion(DOCKER_COMPLETION).get(options.docker)
if docker_config:
if options.python and options.python not in docker_config.supported_pythons:
raise PythonVersionUnsupportedError(f'--docker {options.docker}', options.python, docker_config.supported_pythons)
if docker_config.controller_supported:
if controller_python(options.python) or not options.python:
controller = DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{options.docker}', f'--docker {options.docker} --python {options.python}', FallbackReason.PYTHON
controller = DockerConfig(name=options.docker)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{docker_fallback}', f'--docker {options.docker}', FallbackReason.ENVIRONMENT
controller = DockerConfig(name=docker_fallback)
targets = [DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)]
else:
if not options.python:
raise PythonVersionUnspecifiedError(f'--docker {options.docker}')
if controller_python(options.python):
controller = DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'docker:{docker_fallback}', f'--docker {options.docker} --python {options.python}', FallbackReason.PYTHON
controller = DockerConfig(name=docker_fallback)
targets = [DockerConfig(name=options.docker, python=native_python(options),
privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)]
elif options.remote:
remote_config = filter_completion(REMOTE_COMPLETION).get(options.remote)
context, reason = None, None
if remote_config:
if options.python and options.python not in remote_config.supported_pythons:
raise PythonVersionUnsupportedError(f'--remote {options.remote}', options.python, remote_config.supported_pythons)
if remote_config.controller_supported:
if controller_python(options.python) or not options.python:
controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)
targets = controller_targets(mode, options, controller)
else:
controller_fallback = f'remote:{options.remote}', f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON
controller = PosixRemoteConfig(name=options.remote, provider=options.remote_provider)
targets = controller_targets(mode, options, controller)
else:
context, reason = f'--remote {options.remote}', FallbackReason.ENVIRONMENT
controller = None
targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)]
elif mode == TargetMode.SHELL and options.remote.startswith('windows/'):
if options.python and options.python not in CONTROLLER_PYTHON_VERSIONS:
raise ControllerNotSupportedError(f'--python {options.python}')
controller = OriginConfig(python=native_python(options))
targets = [WindowsRemoteConfig(name=options.remote, provider=options.remote_provider)]
else:
if not options.python:
raise PythonVersionUnspecifiedError(f'--remote {options.remote}')
if controller_python(options.python):
controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)
targets = controller_targets(mode, options, controller)
else:
context, reason = f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON
controller = None
targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider)]
if not controller:
if docker_available():
controller_fallback = f'docker:{docker_fallback}', context, reason
controller = DockerConfig(name=docker_fallback)
else:
controller_fallback = f'remote:{remote_fallback}', context, reason
controller = PosixRemoteConfig(name=remote_fallback)
else: # local/unspecified
# There are several changes in behavior from the legacy implementation when using no delegation (or the `--local` option).
# These changes are due to ansible-test now maintaining consistency between its own Python and that of controller Python subprocesses.
#
# 1) The `--python-interpreter` option (if different from sys.executable) now affects controller subprocesses and triggers re-execution of ansible-test.
# Previously this option was completely ignored except when used with the `--docker` or `--remote` options.
# 2) The `--python` option now triggers re-execution of ansible-test if it differs from sys.version_info.
# Previously it affected Python subprocesses, but not ansible-test itself.
if controller_python(options.python) or not options.python:
controller = OriginConfig(python=native_python(options))
targets = controller_targets(mode, options, controller)
else:
controller_fallback = 'origin:python=default', f'--python {options.python}', FallbackReason.PYTHON
controller = OriginConfig()
targets = controller_targets(mode, options, controller)
if controller_fallback:
controller_option, context, reason = controller_fallback
if mode.no_fallback:
raise ControllerNotSupportedError(context)
fallback_detail = FallbackDetail(
reason=reason,
message=f'Using `--controller {controller_option}` since `{context}` does not support the controller.',
)
else:
fallback_detail = None
if mode.one_host and any(not isinstance(target, ControllerConfig) for target in targets):
raise ControllerNotSupportedError(controller_fallback[1])
if mode == TargetMode.NO_TARGETS:
targets = []
else:
targets = handle_non_posix_targets(mode, options, targets)
return controller, targets, fallback_detail
def handle_non_posix_targets(
mode, # type: TargetMode
options, # type: LegacyHostOptions
targets, # type: t.List[HostConfig]
): # type: (...) -> t.List[HostConfig]
"""Return a list of non-POSIX targets if the target mode is non-POSIX."""
if mode == TargetMode.WINDOWS_INTEGRATION:
if options.windows:
targets = [WindowsRemoteConfig(name=f'windows/{version}', provider=options.remote_provider) for version in options.windows]
else:
targets = [WindowsInventoryConfig(path=options.inventory)]
elif mode == TargetMode.NETWORK_INTEGRATION:
if options.platform:
targets = [NetworkRemoteConfig(name=platform, provider=options.remote_provider) for platform in options.platform]
for platform, collection in options.platform_collection or []:
for entry in targets:
if entry.platform == platform:
entry.collection = collection
for platform, connection in options.platform_connection or []:
for entry in targets:
if entry.platform == platform:
entry.connection = connection
else:
targets = [NetworkInventoryConfig(path=options.inventory)]
return targets
def default_targets(
mode, # type: TargetMode
controller, # type: ControllerHostConfig
): # type: (...) -> t.List[HostConfig]
"""Return a list of default targets for the given target mode."""
if mode == TargetMode.WINDOWS_INTEGRATION:
targets = [WindowsInventoryConfig(path=os.path.abspath('test/integration/inventory.winrm'))]
elif mode == TargetMode.NETWORK_INTEGRATION:
targets = [NetworkInventoryConfig(path=os.path.abspath('test/integration/inventory.networking'))]
elif mode.multiple_pythons:
targets = controller.get_default_targets(HostContext(controller_config=controller))
else:
targets = [ControllerConfig()]
return targets
|
abadger/ansible
|
test/lib/ansible_test/_internal/cli/compat.py
|
Python
|
gpl-3.0
| 22,147
|
# This file is part of PlexPy.
#
# PlexPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PlexPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PlexPy. If not, see <http://www.gnu.org/licenses/>.
from plexpy import logger, datatables, common, database, helpers
import datetime
class DataFactory(object):
"""
Retrieve and process data from the monitor database
"""
def __init__(self):
pass
def get_datatables_history(self, kwargs=None, custom_where=None, grouping=0, watched_percent=85):
data_tables = datatables.DataTables()
group_by = ['session_history.reference_id'] if grouping else ['session_history.id']
columns = ['session_history.reference_id',
'session_history.id',
'started AS date',
'MIN(started) AS started',
'MAX(stopped) AS stopped',
'SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE 0 END) - \
SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS duration',
'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS paused_counter',
'session_history.user_id',
'session_history.user',
'(CASE WHEN users.friendly_name IS NULL THEN users.username ELSE users.friendly_name END) \
AS friendly_name',
'platform',
'player',
'ip_address',
'session_history.media_type',
'session_history_metadata.rating_key',
'session_history_metadata.parent_rating_key',
'session_history_metadata.grandparent_rating_key',
'session_history_metadata.full_title',
'session_history_metadata.parent_title',
'session_history_metadata.year',
'session_history_metadata.media_index',
'session_history_metadata.parent_media_index',
'session_history_metadata.thumb',
'session_history_metadata.parent_thumb',
'session_history_metadata.grandparent_thumb',
'((CASE WHEN view_offset IS NULL THEN 0.1 ELSE view_offset * 1.0 END) / \
(CASE WHEN session_history_metadata.duration IS NULL THEN 1.0 \
ELSE session_history_metadata.duration * 1.0 END) * 100) AS percent_complete',
'session_history_media_info.video_decision',
'session_history_media_info.audio_decision',
'COUNT(*) AS group_count',
'GROUP_CONCAT(session_history.id) AS group_ids'
]
try:
query = data_tables.ssp_query(table_name='session_history',
columns=columns,
custom_where=custom_where,
group_by=group_by,
join_types=['LEFT OUTER JOIN',
'JOIN',
'JOIN'],
join_tables=['users',
'session_history_metadata',
'session_history_media_info'],
join_evals=[['session_history.user_id', 'users.user_id'],
['session_history.id', 'session_history_metadata.id'],
['session_history.id', 'session_history_media_info.id']],
kwargs=kwargs)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_history: %s." % e)
return {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to execute database query.'}
history = query['result']
filter_duration = 0
total_duration = self.get_total_duration(custom_where=custom_where)
rows = []
for item in history:
filter_duration += int(item['duration'])
if item['media_type'] == 'episode' and item['parent_thumb']:
thumb = item['parent_thumb']
elif item['media_type'] == 'episode':
thumb = item['grandparent_thumb']
else:
thumb = item['thumb']
if item['percent_complete'] >= watched_percent:
watched_status = 1
elif item['percent_complete'] >= watched_percent/2:
watched_status = 0.5
else:
watched_status = 0
# Rename Mystery platform names
platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])
row = {'reference_id': item['reference_id'],
'id': item['id'],
'date': item['date'],
'started': item['started'],
'stopped': item['stopped'],
'duration': item['duration'],
'paused_counter': item['paused_counter'],
'user_id': item['user_id'],
'user': item['user'],
'friendly_name': item['friendly_name'],
'platform': platform,
'player': item['player'],
'ip_address': item['ip_address'],
'media_type': item['media_type'],
'rating_key': item['rating_key'],
'parent_rating_key': item['parent_rating_key'],
'grandparent_rating_key': item['grandparent_rating_key'],
'full_title': item['full_title'],
'parent_title': item['parent_title'],
'year': item['year'],
'media_index': item['media_index'],
'parent_media_index': item['parent_media_index'],
'thumb': thumb,
'video_decision': item['video_decision'],
'audio_decision': item['audio_decision'],
'percent_complete': int(round(item['percent_complete'])),
'watched_status': watched_status,
'group_count': item['group_count'],
'group_ids': item['group_ids']
}
rows.append(row)
dict = {'recordsFiltered': query['filteredCount'],
'recordsTotal': query['totalCount'],
'data': rows,
'draw': query['draw'],
'filter_duration': helpers.human_duration(filter_duration, sig='dhm'),
'total_duration': helpers.human_duration(total_duration, sig='dhm')
}
return dict
def get_home_stats(self, grouping=0, time_range='30', stats_type=0, stats_count='5', stats_cards=[], notify_watched_percent='85'):
monitor_db = database.MonitorDatabase()
group_by = 'session_history.reference_id' if grouping else 'session_history.id'
sort_type = 'total_plays' if stats_type == 0 else 'total_duration'
home_stats = []
for stat in stats_cards:
if stat == 'top_tv':
top_tv = []
try:
query = 'SELECT t.id, t.grandparent_title, t.grandparent_rating_key, t.grandparent_thumb, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "episode" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.grandparent_title ' \
'ORDER BY %s DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: top_tv: %s." % e)
return None
for item in result:
row = {'title': item['grandparent_title'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'users_watched': '',
'rating_key': item['grandparent_rating_key'],
'last_play': item['last_watch'],
'grandparent_thumb': item['grandparent_thumb'],
'thumb': '',
'user': '',
'friendly_name': '',
'platform_type': '',
'platform': '',
'row_id': item['id']
}
top_tv.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'rows': top_tv})
elif stat == 'popular_tv':
popular_tv = []
try:
query = 'SELECT t.id, t.grandparent_title, t.grandparent_rating_key, t.grandparent_thumb, ' \
'COUNT(DISTINCT t.user_id) AS users_watched, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "episode" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.grandparent_title ' \
'ORDER BY users_watched DESC, %s DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: popular_tv: %s." % e)
return None
for item in result:
row = {'title': item['grandparent_title'],
'users_watched': item['users_watched'],
'rating_key': item['grandparent_rating_key'],
'last_play': item['last_watch'],
'total_plays': item['total_plays'],
'grandparent_thumb': item['grandparent_thumb'],
'thumb': '',
'user': '',
'friendly_name': '',
'platform_type': '',
'platform': '',
'row_id': item['id']
}
popular_tv.append(row)
home_stats.append({'stat_id': stat,
'rows': popular_tv})
elif stat == 'top_movies':
top_movies = []
try:
query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "movie" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.full_title ' \
'ORDER BY %s DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: top_movies: %s." % e)
return None
for item in result:
row = {'title': item['full_title'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'users_watched': '',
'rating_key': item['rating_key'],
'last_play': item['last_watch'],
'grandparent_thumb': '',
'thumb': item['thumb'],
'user': '',
'friendly_name': '',
'platform_type': '',
'platform': '',
'row_id': item['id']
}
top_movies.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'rows': top_movies})
elif stat == 'popular_movies':
popular_movies = []
try:
query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, ' \
'COUNT(DISTINCT t.user_id) AS users_watched, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "movie" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.full_title ' \
'ORDER BY users_watched DESC, %s DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: popular_movies: %s." % e)
return None
for item in result:
row = {'title': item['full_title'],
'users_watched': item['users_watched'],
'rating_key': item['rating_key'],
'last_play': item['last_watch'],
'total_plays': item['total_plays'],
'grandparent_thumb': '',
'thumb': item['thumb'],
'user': '',
'friendly_name': '',
'platform_type': '',
'platform': '',
'row_id': item['id']
}
popular_movies.append(row)
home_stats.append({'stat_id': stat,
'rows': popular_movies})
elif stat == 'top_music':
top_music = []
try:
query = 'SELECT t.id, t.grandparent_title, t.grandparent_rating_key, t.grandparent_thumb, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "track" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.grandparent_title ' \
'ORDER BY %s DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: top_music: %s." % e)
return None
for item in result:
row = {'title': item['grandparent_title'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'users_watched': '',
'rating_key': item['grandparent_rating_key'],
'last_play': item['last_watch'],
'grandparent_thumb': item['grandparent_thumb'],
'thumb': '',
'user': '',
'friendly_name': '',
'platform_type': '',
'platform': '',
'row_id': item['id']
}
top_music.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'rows': top_music})
elif stat == 'popular_music':
popular_music = []
try:
query = 'SELECT t.id, t.grandparent_title, t.grandparent_rating_key, t.grandparent_thumb, ' \
'COUNT(DISTINCT t.user_id) AS users_watched, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "track" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.grandparent_title ' \
'ORDER BY users_watched DESC, %s DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: popular_music: %s." % e)
return None
for item in result:
row = {'title': item['grandparent_title'],
'users_watched': item['users_watched'],
'rating_key': item['grandparent_rating_key'],
'last_play': item['last_watch'],
'total_plays': item['total_plays'],
'grandparent_thumb': item['grandparent_thumb'],
'thumb': '',
'user': '',
'friendly_name': '',
'platform_type': '',
'platform': '',
'row_id': item['id']
}
popular_music.append(row)
home_stats.append({'stat_id': stat,
'rows': popular_music})
elif stat == 'top_users':
top_users = []
try:
query = 'SELECT t.user, t.user_id, t.user_thumb, t.custom_thumb, ' \
'(CASE WHEN t.friendly_name IS NULL THEN t.username ELSE t.friendly_name END) ' \
' AS friendly_name, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d, users.thumb AS user_thumb, users.custom_avatar_url AS custom_thumb ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' LEFT OUTER JOIN users ON session_history.user_id = users.user_id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.user_id ' \
'ORDER BY %s DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: top_users: %s." % e)
return None
for item in result:
if item['custom_thumb'] and item['custom_thumb'] != item['user_thumb']:
user_thumb = item['custom_thumb']
elif item['user_thumb']:
user_thumb = item['user_thumb']
else:
user_thumb = common.DEFAULT_USER_THUMB
row = {'user': item['user'],
'user_id': item['user_id'],
'friendly_name': item['friendly_name'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'last_play': item['last_watch'],
'user_thumb': user_thumb,
'grandparent_thumb': '',
'users_watched': '',
'rating_key': '',
'title': '',
'platform_type': '',
'platform': '',
'row_id': ''
}
top_users.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'rows': top_users})
elif stat == 'top_platforms':
top_platform = []
try:
query = 'SELECT t.platform, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.platform ' \
'ORDER BY %s DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: top_platforms: %s." % e)
return None
for item in result:
# Rename Mystery platform names
platform_type = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])
row = {'platform': item['platform'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'last_play': item['last_watch'],
'platform_type': platform_type,
'title': '',
'thumb': '',
'grandparent_thumb': '',
'users_watched': '',
'rating_key': '',
'user': '',
'friendly_name': '',
'row_id': ''
}
top_platform.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'rows': top_platform})
elif stat == 'last_watched':
last_watched = []
try:
query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, t.grandparent_thumb, ' \
't.user, t.user_id, t.custom_avatar_url as user_thumb, t.player, ' \
'(CASE WHEN t.friendly_name IS NULL THEN t.username ELSE t.friendly_name END) ' \
' AS friendly_name, ' \
'MAX(t.started) AS last_watch, ' \
'((CASE WHEN t.view_offset IS NULL THEN 0.1 ELSE t.view_offset * 1.0 END) / ' \
' (CASE WHEN t.duration IS NULL THEN 1.0 ELSE t.duration * 1.0 END) * 100) ' \
' AS percent_complete ' \
'FROM (SELECT * FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' LEFT OUTER JOIN users ON session_history.user_id = users.user_id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND (session_history.media_type = "movie" ' \
' OR session_history_metadata.media_type = "episode") ' \
' GROUP BY %s) AS t ' \
'WHERE percent_complete >= %s ' \
'GROUP BY t.id ' \
'ORDER BY last_watch DESC ' \
'LIMIT %s' % (time_range, group_by, notify_watched_percent, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: last_watched: %s." % e)
return None
for item in result:
if not item['grandparent_thumb'] or item['grandparent_thumb'] == '':
thumb = item['thumb']
else:
thumb = item['grandparent_thumb']
row = {'row_id': item['id'],
'user': item['user'],
'friendly_name': item['friendly_name'],
'user_id': item['user_id'],
'user_thumb': item['user_thumb'],
'title': item['full_title'],
'rating_key': item['rating_key'],
'thumb': thumb,
'grandparent_thumb': item['grandparent_thumb'],
'last_watch': item['last_watch'],
'player': item['player']
}
last_watched.append(row)
home_stats.append({'stat_id': stat,
'rows': last_watched})
elif stat == 'most_concurrent':
def calc_most_concurrent(title, result):
'''
Function to calculate most concurrent streams
Input: Stat title, SQLite query result
Output: Dict {title, count, started, stopped}
'''
times = []
for item in result:
times.append({'time': str(item['started']) + 'B', 'count': 1})
times.append({'time': str(item['stopped']) + 'A', 'count': -1})
times = sorted(times, key=lambda k: k['time'])
count = 0
last_count = 0
last_start = 0
concurrent = {'title': title,
'count': 0,
'started': None,
'stopped': None
}
for d in times:
if d['count'] == 1:
count += d['count']
if count >= last_count:
last_start = d['time']
else:
if count >= last_count:
last_count = count
concurrent['count'] = count
concurrent['started'] = last_start[:-1]
concurrent['stopped'] = d['time'][:-1]
count += d['count']
return concurrent
most_concurrent = []
try:
base_query = 'SELECT session_history.started, session_history.stopped ' \
'FROM session_history ' \
'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \
'WHERE datetime(stopped, "unixepoch", "localtime") ' \
'>= datetime("now", "-%s days", "localtime") ' % time_range
title = 'Concurrent Streams'
query = base_query
result = monitor_db.select(query)
if result:
most_concurrent.append(calc_most_concurrent(title, result))
title = 'Concurrent Transcodes'
query = base_query \
+ 'AND (session_history_media_info.video_decision = "transcode" ' \
'OR session_history_media_info.audio_decision = "transcode") '
result = monitor_db.select(query)
if result:
most_concurrent.append(calc_most_concurrent(title, result))
title = 'Concurrent Direct Streams'
query = base_query \
+ 'AND (session_history_media_info.video_decision != "transcode" ' \
'AND session_history_media_info.audio_decision = "copy") '
result = monitor_db.select(query)
if result:
most_concurrent.append(calc_most_concurrent(title, result))
title = 'Concurrent Direct Plays'
query = base_query \
+ 'AND (session_history_media_info.video_decision = "direct play" ' \
'OR session_history_media_info.audio_decision = "direct play") '
result = monitor_db.select(query)
if result:
most_concurrent.append(calc_most_concurrent(title, result))
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_home_stats: most_concurrent: %s." % e)
return None
home_stats.append({'stat_id': stat,
'rows': most_concurrent})
return home_stats
def get_library_stats(self, library_cards=[]):
monitor_db = database.MonitorDatabase()
library_stats = []
for id in library_cards:
if id.isdigit():
try:
query = 'SELECT section_id, section_name, section_type, thumb, count, parent_count, child_count ' \
'FROM library_sections ' \
'WHERE section_id = %s ' % id
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_library_stats: %s." % e)
return None
for item in result:
library = {'section_id': item['section_id'],
'section_name': item['section_name'],
'section_type': item['section_type'],
'thumb': item['thumb'],
'count': item['count'],
'parent_count': item['parent_count'],
'child_count': item['child_count']
}
library_stats.append(library)
return library_stats
def get_stream_details(self, row_id=None):
monitor_db = database.MonitorDatabase()
if row_id:
query = 'SELECT container, bitrate, video_resolution, width, height, aspect_ratio, video_framerate, ' \
'video_codec, audio_codec, audio_channels, video_decision, transcode_video_codec, transcode_height, ' \
'transcode_width, audio_decision, transcode_audio_codec, transcode_audio_channels, media_type, ' \
'title, grandparent_title ' \
'from session_history_media_info ' \
'join session_history_metadata on session_history_media_info.id = session_history_metadata.id ' \
'where session_history_media_info.id = ?'
result = monitor_db.select(query, args=[row_id])
else:
return None
stream_output = {}
for item in result:
stream_output = {'container': item['container'],
'bitrate': item['bitrate'],
'video_resolution': item['video_resolution'],
'width': item['width'],
'height': item['height'],
'aspect_ratio': item['aspect_ratio'],
'video_framerate': item['video_framerate'],
'video_codec': item['video_codec'],
'audio_codec': item['audio_codec'],
'audio_channels': item['audio_channels'],
'transcode_video_dec': item['video_decision'],
'transcode_video_codec': item['transcode_video_codec'],
'transcode_height': item['transcode_height'],
'transcode_width': item['transcode_width'],
'transcode_audio_dec': item['audio_decision'],
'transcode_audio_codec': item['transcode_audio_codec'],
'transcode_audio_channels': item['transcode_audio_channels'],
'media_type': item['media_type'],
'title': item['title'],
'grandparent_title': item['grandparent_title']
}
return stream_output
def get_metadata_details(self, rating_key):
monitor_db = database.MonitorDatabase()
if rating_key:
query = 'SELECT session_history_metadata.rating_key, session_history_metadata.parent_rating_key, ' \
'session_history_metadata.grandparent_rating_key, session_history_metadata.title, ' \
'session_history_metadata.parent_title, session_history_metadata.grandparent_title, ' \
'session_history_metadata.full_title, library_sections.section_name, ' \
'session_history_metadata.media_index, session_history_metadata.parent_media_index, ' \
'session_history_metadata.section_id, session_history_metadata.thumb, ' \
'session_history_metadata.parent_thumb, session_history_metadata.grandparent_thumb, ' \
'session_history_metadata.art, session_history_metadata.media_type, session_history_metadata.year, ' \
'session_history_metadata.originally_available_at, session_history_metadata.added_at, ' \
'session_history_metadata.updated_at, session_history_metadata.last_viewed_at, ' \
'session_history_metadata.content_rating, session_history_metadata.summary, ' \
'session_history_metadata.tagline, session_history_metadata.rating, session_history_metadata.duration, ' \
'session_history_metadata.guid, session_history_metadata.directors, session_history_metadata.writers, ' \
'session_history_metadata.actors, session_history_metadata.genres, session_history_metadata.studio, ' \
'session_history_media_info.container, session_history_media_info.bitrate, ' \
'session_history_media_info.video_codec, session_history_media_info.video_resolution, ' \
'session_history_media_info.video_framerate, session_history_media_info.audio_codec, ' \
'session_history_media_info.audio_channels ' \
'FROM session_history_metadata ' \
'JOIN library_sections ON session_history_metadata.section_id = library_sections.section_id ' \
'JOIN session_history_media_info ON session_history_metadata.id = session_history_media_info.id ' \
'WHERE session_history_metadata.rating_key = ?'
result = monitor_db.select(query=query, args=[rating_key])
else:
result = []
metadata = {}
for item in result:
directors = item['directors'].split(';') if item['directors'] else []
writers = item['writers'].split(';') if item['writers'] else []
actors = item['actors'].split(';') if item['actors'] else []
genres = item['genres'].split(';') if item['genres'] else []
metadata = {'media_type': item['media_type'],
'rating_key': item['rating_key'],
'parent_rating_key': item['parent_rating_key'],
'grandparent_rating_key': item['grandparent_rating_key'],
'grandparent_title': item['grandparent_title'],
'parent_media_index': item['parent_media_index'],
'parent_title': item['parent_title'],
'media_index': item['media_index'],
'studio': item['studio'],
'title': item['title'],
'content_rating': item['content_rating'],
'summary': item['summary'],
'tagline': item['tagline'],
'rating': item['rating'],
'duration': item['duration'],
'year': item['year'],
'thumb': item['thumb'],
'parent_thumb': item['parent_thumb'],
'grandparent_thumb': item['grandparent_thumb'],
'art': item['art'],
'originally_available_at': item['originally_available_at'],
'added_at': item['added_at'],
'updated_at': item['updated_at'],
'last_viewed_at': item['last_viewed_at'],
'guid': item['guid'],
'writers': writers,
'directors': directors,
'genres': genres,
'actors': actors,
'library_name': item['section_name'],
'section_id': item['section_id'],
'container': item['container'],
'bitrate': item['bitrate'],
'video_codec': item['video_codec'],
'video_resolution': item['video_resolution'],
'video_framerate': item['video_framerate'],
'audio_codec': item['audio_codec'],
'audio_channels': item['audio_channels']
}
return metadata
def get_total_duration(self, custom_where=None):
monitor_db = database.MonitorDatabase()
# Split up custom wheres
if custom_where:
where = 'WHERE ' + ' AND '.join([w[0] + ' = "' + w[1] + '"' for w in custom_where])
else:
where = ''
try:
query = 'SELECT SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE 0 END) - ' \
'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS total_duration ' \
'FROM session_history ' \
'JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
'%s ' % where
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_total_duration: %s." % e)
return None
total_duration = 0
for item in result:
total_duration = item['total_duration']
return total_duration
def get_session_ip(self, session_key=''):
monitor_db = database.MonitorDatabase()
if session_key:
query = 'SELECT ip_address FROM sessions WHERE session_key = %d' % int(session_key)
result = monitor_db.select(query)
else:
return None
ip_address = 'N/A'
for item in result:
ip_address = item['ip_address']
return ip_address
def get_search_query(self, rating_key=''):
monitor_db = database.MonitorDatabase()
if rating_key:
query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key, title, parent_title, grandparent_title, ' \
'media_index, parent_media_index, year, media_type ' \
'FROM session_history_metadata ' \
'WHERE rating_key = ? ' \
'OR parent_rating_key = ? ' \
'OR grandparent_rating_key = ? ' \
'LIMIT 1'
result = monitor_db.select(query=query, args=[rating_key, rating_key, rating_key])
else:
result = []
query = {}
query_string = None
media_type = None
for item in result:
title = item['title']
parent_title = item['parent_title']
grandparent_title = item['grandparent_title']
media_index = item['media_index']
parent_media_index = item['parent_media_index']
year = item['year']
if str(item['rating_key']) == rating_key:
query_string = item['title']
media_type = item['media_type']
elif str(item['parent_rating_key']) == rating_key:
if item['media_type'] == 'episode':
query_string = item['grandparent_title']
media_type = 'season'
elif item['media_type'] == 'track':
query_string = item['parent_title']
media_type = 'album'
elif str(item['grandparent_rating_key']) == rating_key:
if item['media_type'] == 'episode':
query_string = item['grandparent_title']
media_type = 'show'
elif item['media_type'] == 'track':
query_string = item['grandparent_title']
media_type = 'artist'
if query_string and media_type:
query = {'query_string': query_string,
'title': title,
'parent_title': parent_title,
'grandparent_title': grandparent_title,
'media_index': media_index,
'parent_media_index': parent_media_index,
'year': year,
'media_type': media_type,
'rating_key': rating_key
}
else:
return None
return query
def get_rating_keys_list(self, rating_key='', media_type=''):
monitor_db = database.MonitorDatabase()
if media_type == 'movie':
key_list = {0: {'rating_key': int(rating_key)}}
return key_list
if media_type == 'artist' or media_type == 'album' or media_type == 'track':
match_type = 'title'
else:
match_type = 'index'
# Get the grandparent rating key
try:
query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key ' \
'FROM session_history_metadata ' \
'WHERE rating_key = ? ' \
'OR parent_rating_key = ? ' \
'OR grandparent_rating_key = ? ' \
'LIMIT 1'
result = monitor_db.select(query=query, args=[rating_key, rating_key, rating_key])
grandparent_rating_key = result[0]['grandparent_rating_key']
except Exception as e:
logger.warn(u"PlexPy DataFactory :: Unable to execute database query for get_rating_keys_list: %s." % e)
return {}
query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key, title, parent_title, grandparent_title, ' \
'media_index, parent_media_index ' \
'FROM session_history_metadata ' \
'WHERE {0} = ? ' \
'GROUP BY {1} '
# get grandparent_rating_keys
grandparents = {}
result = monitor_db.select(query=query.format('grandparent_rating_key', 'grandparent_rating_key'),
args=[grandparent_rating_key])
for item in result:
# get parent_rating_keys
parents = {}
result = monitor_db.select(query=query.format('grandparent_rating_key', 'parent_rating_key'),
args=[item['grandparent_rating_key']])
for item in result:
# get rating_keys
children = {}
result = monitor_db.select(query=query.format('parent_rating_key', 'rating_key'),
args=[item['parent_rating_key']])
for item in result:
key = item['media_index']
children.update({key: {'rating_key': item['rating_key']}})
key = item['parent_media_index'] if match_type == 'index' else item['parent_title']
parents.update({key:
{'rating_key': item['parent_rating_key'],
'children': children}
})
key = 0 if match_type == 'index' else item['grandparent_title']
grandparents.update({key:
{'rating_key': item['grandparent_rating_key'],
'children': parents}
})
key_list = grandparents
return key_list
def delete_session_history_rows(self, row_id=None):
monitor_db = database.MonitorDatabase()
if row_id.isdigit():
logger.info(u"PlexPy DataFactory :: Deleting row id %s from the session history database." % row_id)
session_history_del = \
monitor_db.action('DELETE FROM session_history WHERE id = ?', [row_id])
session_history_media_info_del = \
monitor_db.action('DELETE FROM session_history_media_info WHERE id = ?', [row_id])
session_history_metadata_del = \
monitor_db.action('DELETE FROM session_history_metadata WHERE id = ?', [row_id])
return 'Deleted rows %s.' % row_id
else:
return 'Unable to delete rows. Input row not valid.'
def update_metadata(self, old_key_list='', new_key_list='', media_type=''):
from plexpy import pmsconnect
pms_connect = pmsconnect.PmsConnect()
monitor_db = database.MonitorDatabase()
# function to map rating keys pairs
def get_pairs(old, new):
pairs = {}
for k, v in old.iteritems():
if k in new:
pairs.update({v['rating_key']: new[k]['rating_key']})
if 'children' in old[k]:
pairs.update(get_pairs(old[k]['children'], new[k]['children']))
return pairs
# map rating keys pairs
mapping = {}
if old_key_list and new_key_list:
mapping = get_pairs(old_key_list, new_key_list)
if mapping:
logger.info(u"PlexPy DataFactory :: Updating metadata in the database.")
for old_key, new_key in mapping.iteritems():
result = pms_connect.get_metadata_details(new_key)
if result:
metadata = result['metadata']
if metadata['media_type'] == 'show' or metadata['media_type'] == 'artist':
# check grandparent_rating_key (2 tables)
monitor_db.action('UPDATE session_history SET grandparent_rating_key = ? WHERE grandparent_rating_key = ?',
[new_key, old_key])
monitor_db.action('UPDATE session_history_metadata SET grandparent_rating_key = ? WHERE grandparent_rating_key = ?',
[new_key, old_key])
elif metadata['media_type'] == 'season' or metadata['media_type'] == 'album':
# check parent_rating_key (2 tables)
monitor_db.action('UPDATE session_history SET parent_rating_key = ? WHERE parent_rating_key = ?',
[new_key, old_key])
monitor_db.action('UPDATE session_history_metadata SET parent_rating_key = ? WHERE parent_rating_key = ?',
[new_key, old_key])
else:
# check rating_key (2 tables)
monitor_db.action('UPDATE session_history SET rating_key = ? WHERE rating_key = ?',
[new_key, old_key])
monitor_db.action('UPDATE session_history_media_info SET rating_key = ? WHERE rating_key = ?',
[new_key, old_key])
# update session_history_metadata table
self.update_metadata_details(old_key, new_key, metadata)
return 'Updated metadata in database.'
else:
return 'Unable to update metadata in database. No changes were made.'
def update_metadata_details(self, old_rating_key='', new_rating_key='', metadata=None):
if metadata:
# Create full_title
if metadata['media_type'] == 'episode' or metadata['media_type'] == 'track':
full_title = '%s - %s' % (metadata['grandparent_title'], metadata['title'])
else:
full_title = metadata['title']
directors = ";".join(metadata['directors'])
writers = ";".join(metadata['writers'])
actors = ";".join(metadata['actors'])
genres = ";".join(metadata['genres'])
#logger.info(u"PlexPy DataFactory :: Updating metadata in the database for rating key: %s." % new_rating_key)
monitor_db = database.MonitorDatabase()
# Update the session_history_metadata table
query = 'UPDATE session_history_metadata SET rating_key = ?, parent_rating_key = ?, ' \
'grandparent_rating_key = ?, title = ?, parent_title = ?, grandparent_title = ?, full_title = ?, ' \
'media_index = ?, parent_media_index = ?, section_id = ?, thumb = ?, parent_thumb = ?, ' \
'grandparent_thumb = ?, art = ?, media_type = ?, year = ?, originally_available_at = ?, ' \
'added_at = ?, updated_at = ?, last_viewed_at = ?, content_rating = ?, summary = ?, ' \
'tagline = ?, rating = ?, duration = ?, guid = ?, directors = ?, writers = ?, actors = ?, ' \
'genres = ?, studio = ? ' \
'WHERE rating_key = ?'
args = [metadata['rating_key'], metadata['parent_rating_key'], metadata['grandparent_rating_key'],
metadata['title'], metadata['parent_title'], metadata['grandparent_title'], full_title,
metadata['media_index'], metadata['parent_media_index'], metadata['section_id'], metadata['thumb'],
metadata['parent_thumb'], metadata['grandparent_thumb'], metadata['art'], metadata['media_type'],
metadata['year'], metadata['originally_available_at'], metadata['added_at'], metadata['updated_at'],
metadata['last_viewed_at'], metadata['content_rating'], metadata['summary'], metadata['tagline'],
metadata['rating'], metadata['duration'], metadata['guid'], directors, writers, actors, genres,
metadata['studio'],
old_rating_key]
monitor_db.action(query=query, args=args)
|
Hellowlol/plexpy
|
plexpy/datafactory.py
|
Python
|
gpl-3.0
| 57,912
|
from apps.plus_permissions.default_agents import get_admin_user
from apps.plus_permissions.models import GenericReference
def patch():
for ref in GenericReference.objects.filter(creator=None):
ref.creator = get_admin_user()
ref.save()
patch()
|
thehub/hubplus
|
scripts/patch_ref_creator_field.py
|
Python
|
gpl-3.0
| 267
|
import os
import cv2
import sys
import math
import time
import json
import argparse
import numpy as np
from ownLibraries.pathsandnames import PathsAndNames
debug = False
nuevaLocalizacion = ''
# We get the current working git branch
gitBranches = os.popen('git branch').read().replace(' ','').split('\n')
gitVersion = [branch for branch in gitBranches if '*' in branch][0]
def convertirAJSON(lista = None,file = './datos.npy'):
if lista == None:
lista = np.load(file)
diccionarioAJson = {'lowResolution':{ 'trafficLightPixels':lista[0],
'departure':lista[1],
'arrival':lista[2],
'right':lista[3],
'left':lista[4],
'angle':lista[5][0],
'regionOfInterest':[]
},
'highResolution':{ 'trafficLightPixels':lista[7],
'departure':[],
'arrival':[],
'right':[],
'left':[],
'angle':0,
'regionOfInterest':lista[6]
},
'location':nuevaLocalizacion,
'gpsCoordinates':{'lat':0,'lon':0,'hei':0},
'debug':debug,
'gitVersion':gitVersion,
'lucam':5,
'notes':''}
with open(jsonToWrite, 'w') as file:
json.dump(diccionarioAJson, file)
print('Archivo guardado como json exitosamente')
directorioDeVideos = PathsAndNames.directorioDeVideos+'/'
jsonToWrite = PathsAndNames.folderDeInstalacion+'/datos.json'
parser = argparse.ArgumentParser(description='Add newData')
parser.add_argument('-l', '--newLocation', default = None, type=str, help="Add new subfolder")
parser.add_argument('-s', '--source', default = None, type=str, help="Add input for debug")
parser.add_argument('-c', '--convertir', default = None, type=str, help="justConvertFile")
args = parser.parse_args()
if not os.path.exists(PathsAndNames.folderDeInstalacion):
os.makedirs(PathsAndNames.folderDeInstalacion)
if args.newLocation != None:
nuevaLocalizacion = args.newLocation
print('Trabajando para '+nuevaLocalizacion)
else:
nuevaLocalizacion = 'debug'
print('Trabajando en modo debug')
if args.source != None:
nameSourceVideo = args.source
jsonToWrite = jsonToWrite[:-10]+nameSourceVideo[:-3]+'json'
archivo = args.source
else:
nameSourceVideo = 0
archivo = 'datos.mp4'
help_message= '''Selección zona de análisis
Incluye la seleccion de semaforo en imagen de Alta resolución
'''
listaSemFinal=[]
listaCorte=[]
listaSem=[]
lista=[]
listaAux=[]
listaAux1=[]
listaAux2=[]
listaAux3=[]
listaSemaAlta=[]
listCorteAltaRes=[]
###############Get indices#################
def obtenerIndicesSemaforo(poligono640):
punto0 = poligono640[0]
punto1 = poligono640[1]
punto2 = poligono640[2]
punto3 = poligono640[3]
vectorHorizontal = punto3 - punto0
vectorVertical = punto1 - punto0
pasoHorizontal = vectorHorizontal/8
pasoVertical = vectorVertical/24
indices = []
for j in range(24):
for i in range(8):
indices.append((punto0+i*pasoHorizontal+j*pasoVertical).tolist())
#print('len of indices', len(indices))
#print('single index', indices[0])
#indices = [[round(x[0]),round(x[1])] for x in indices]
return indices
###########Calculate new Points############
def transformIma(lista):
x2=lista[0][0]
y2=lista[0][1]
x3=lista[1][0]
y3=lista[1][1]
distancia=math.sqrt((x2-x3)**2+(y2-y3)**2)
altura=distancia/3
if y2>y3:
#print('caso1')
anguloInicial=math.asin((y2-y3)/distancia)
anguloInicialGrados=anguloInicial*180/(math.pi)
anguloGrados=180-90-anguloInicialGrados
anguloRadians=anguloGrados*(math.pi)/180
y=altura*math.sin(anguloRadians)
x=altura*math.cos(anguloRadians)
x1=int(x2-x)
y1_0=int(y2-y)
y1=int(y1_0-altura)
x4=int(x3-x)
y4_0=int(y3-y)
y4=int(y4_0-altura)
poligon=[(x1,y1_0),(x2,y2),(x3,y3),(x4,y4_0)]
poligonAdd=[(x1,y1),(x2,y2),(x3,y3),(x4,y4)]
if y3==y2:
#print('caso2')
x1=x2
y1_0=int(altura)
y1=int(2*altura)
x4=x3
y4_0=int(altura)
y4=int(2*altura)
poligon=[(x1,y1_0),(x2,y2),(x3,y3),(x4,y4_0)]
poligonAdd=[(x1,y1),(x2,y2),(x3,y3),(x4,y4)]
if y3>y2:
#print('caso3')
anguloInicial=math.asin((y3-y2)/distancia)
anguloInicialGrados=anguloInicial*180/(math.pi)
anguloGrados=180-90-anguloInicialGrados
anguloRadians=anguloGrados*(math.pi)/180
y=altura*math.sin(anguloRadians)
x=altura*math.cos(anguloRadians)
x1=int(x2+x)
y1_0=int(y2-y)
y1=int(y1_0-altura)
x4=int(x3+x)
y4_0=int(y3-y)
y4=int(y4_0-altura)
poligon=[(x1,y1_0),(x2,y2),(x3,y3),(x4,y4_0)]
poligonAdd=[(x1,y1),(x2,y2),(x3,y3),(x4,y4)]
return poligon, poligonAdd
###### Get points with the mouse click######
def get_PointsSemaforZona(event,x,y,flags,param):
global frame
if event == cv2.EVENT_LBUTTONDOWN:
listaCorte.append((x,y))
if len(listaCorte)!= 0:
cv2.circle(frame, (x,y),2,(0,255,0),-1)
cv2.imshow('semaforo_Zona',frame)
if len(listaCorte)== 2:
frame=cv2.rectangle(frame,listaCorte[0],listaCorte[1],(0,0,255),3)
cv2.imshow('semaforo_Zona',frame)
########## Traffic light Selection##########
def get_PointsSema(event,x,y,flags,param):
global imag
if event == cv2.EVENT_LBUTTONDOWN:
listaSem.append((x,y))
if len(listaSem)!= 0:
cv2.circle(imag, (x,y),2,(0,0,255),-1)
cv2.imshow('semaforo',imag)
##########################################
def get_Points(event,x,y,flags,param):
global frame
if event == cv2.EVENT_LBUTTONDOWN:
listaAux.append((x,y))
if len(lista)>0:
listaAux1.append((x//2,y//2))
else:
listaAux1.append((x//2,y//2))
if len(listaAux)!= 0:
cv2.circle(frame, (x,y),2,(0,255,255),-1)
cv2.imshow('First_Frame',frame)
if len(listaAux)>= 2:
cv2.line(frame,listaAux[len(listaAux)-1],listaAux[len(listaAux)-2],(0,255,255),1)
if len(listaAux)>= 4:
cv2.line(frame,listaAux[len(listaAux)-1],listaAux[len(listaAux)-len(listaAux)],(0,255,255),1)
cv2.imshow('First_Frame',frame)
############High resolution#############
def get_BigRectangle(event,x,y,flags,param):
global frame
if event == cv2.EVENT_LBUTTONDOWN:
listaAux2.append((x,y))
if len(listaAux2)<3:
listCorteAltaRes.append((x*4,y*4))
cv2.circle(frame, (x,y),3,(0,255,0),-1)
cv2.imshow('FrameDeAltaResolucion',frame)
if len(listaAux2)==2:
lista.append((listCorteAltaRes))
frame=cv2.rectangle(frame,listaAux2[0],listaAux2[1],(0,0,255),3)
cv2.imshow('FrameDeAltaResolucion',frame)
def get_Big_Sem_Polig(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
global frame
listaAux2.append((x,y))
if len(listaAux2)!= 0:
cv2.circle(frame, (x,y),2,(0,255,0),-1)
cv2.imshow('FrameResolucion',frame)
if len(listaAux2)== 2:
frame=cv2.rectangle(frame,listaAux2[0],listaAux2[1],(0,0,255),2)
cv2.imshow('FrameResolucion',frame)
###############################################################
def get_TrafficLight(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
listaAux3.append((x,y))
if len(listaAux3)<5:
listaSemaAlta.append((Const_X*(Punto_X0+const_x_zoom*x),Const_Y*(Punto_Y0+const_y_zoom*y)))
cv2.circle(frame, (x,y),2,(0,250,255),-1)
cv2.imshow('FrameToTrafficLight',frame)
if len(listaAux3)==4:
indice_Alta=obtenerIndicesSemaforo(np.array(listaSemaAlta))
print('listaSemaforo..'+str(listaSemaAlta))
print('Puntos obtenidos: ',indice_Alta)
print(indice)
lista.append((indice_Alta))
print('Press -q- to Save and Exit')
##################################################################3####
#
def calcPoints(imag,listaS):
imag=imag
ancho=imag.shape[1]
largo=imag.shape[0]
x1=listaS[0]
y1=listaS[1]
x=listaCorte[0][0]+(x1*ancho)//640
y=listaCorte[0][1]+(y1*largo)//480
return x, y
if __name__ == '__main__':
print (help_message)
try:
cap=cv2.VideoCapture(directorioDeVideos+nameSourceVideo)
for i in range(100):
ret, frame=cap.read()
frame=cv2.resize(frame,(640,480))
val=1
except:
val=0
if val==0:
print('Accediendo a imagen de flujo...')
try:
frame=cv2.imread(PathsAndNames.folderDeInstalacion+'/flujo.jpg')
frame=cv2.resize(frame,(640,480))
except:
print('No se encontro imagen de flujo')
print('Accediendo a cámara...')
cap=cv2.VideoCapture(0)
for i in range(100):
ret, frame=cap.read()
frame=cv2.resize(frame,(640,480))
frame2=frame.copy()
fram=frame.copy()
print('select traffic light zone, press -z- to zoom ')
cv2.namedWindow('semaforo_Zona')
cv2.setMouseCallback('semaforo_Zona', get_PointsSemaforZona)
while True:
cv2 .imshow('semaforo_Zona',frame)
keyPress = cv2.waitKey()
if keyPress == ord('z'):
break
if keyPress == ord('q'):
print ('Interrumpido...')
break
cv2.destroyAllWindows()
######################################
###Selección Semáforo:
imag1=frame2[listaCorte[0][1]:listaCorte[1][1],listaCorte[0][0]:listaCorte[1][0]]
imag=imag1.copy()
imag=cv2.resize(imag,(640,480))
cv2.namedWindow('semaforo')
cv2.setMouseCallback('semaforo', get_PointsSema)
print('Select points and press -q- to accept the traffic light points and exit')
while(1):
cv2.imshow('semaforo',imag)
keyPress = cv2.waitKey()
if keyPress&0xFF==ord('q'):
#print('listaSemaforo..'+str(listaSem))
rang=len(listaSem)
##TRansformando a coordenadas origen
for i in range(0,len(listaSem)):
x,y=calcPoints(imag1,listaSem[i])
listaSemFinal.append((x,y))
####-----------------------------------------
indice=obtenerIndicesSemaforo(np.array(listaSemFinal))
print('listaSemaforo..'+str(listaSemFinal))
print(indice)
lista.append((indice))
break
cv2.destroyAllWindows()
frame=fram.copy()
overlay=frame.copy()
cv2.namedWindow('First_Frame')
cv2.setMouseCallback('First_Frame', get_Points)
while True:
print('press ESC to not accept and -y- to accept')
cv2.imshow('First_Frame',frame)
keyPress = cv2.waitKey()
if keyPress == ord('y'):
####listaAux1=listaAux(en magnitud)
####listaAux1:contine valores para 320x240 ;listaAux:Valores para 640x480
if len(listaAux)==2:
pol1,polAdd=transformIma(listaAux)
pol320,polAdd320=transformIma(listaAux1)
lista.append((polAdd320))
vrx=np.array([[pol1]],np.int32)
pts=vrx.reshape((-1,1,2))
cv2.polylines(frame,[pts],True,(255,0,0))
#print(pol1)
########################
vrx=np.array([[polAdd]],np.int32)
pts=vrx.reshape((-1,1,2))
cv2.polylines(frame,[pts],True,(0,0,255))
#print(polAdd)
listaAux=[]
listaAux1=[]
cv2.imshow('First_Frame',frame)
if len(listaAux)>2:
print('_____Data accept..')
print('*Select two points next press -a- to obtain Angle')
lista.append((listaAux1))
vrx=np.array([[listaAux]],np.int32)
pts=vrx.reshape((-1,1,2))
cv2.polylines(frame,[pts],True,(0,0,255))
cv2.imshow('First_Frame',frame)
#print('lista: '+str(lista))
listaAux=[]
listaAux1=[]
#print('ListaAux Removed...'+ str(listaAux))
overlay=frame.copy()
if keyPress == 27:
print('_____Data not accept..')
print('*Select two points next press -a- to obtain Angle')
#print('lista no append: '+str(lista))
listaAux=[]
listaAux1=[]
#print('ListaAux Removed...'+ str(listaAux))
frame=overlay.copy()
cv2.imshow('First_Frame',frame)
if keyPress == ord('a'):
vrx=np.array([[listaSemFinal]],np.int32)
pts=vrx.reshape((-1,1,2))
cv2.polylines(frame,[pts],True,(0,255,255))
cv2.line(frame,(listaAux[0]),(listaAux[1]),(255,0,0),2)
##Angle:
x1=listaAux[0][0]
y1=listaAux[0][1]
x2=listaAux[1][0]
y2=listaAux[1][1]
nume=-(y2-y1) # por la inversion que se maneja al emplear imagenes
deno=x2-x1
if deno ==0:
alpha = np.sign(nume)*90
else:
alpha=math.atan(nume/deno)
alpha=alpha*180/(math.pi)
if (deno<0):
alpha=alpha+180
#print('angule:.'+str(alpha))
lista.append([int(alpha)])
print('Press -q- to go a Full Resolution')
if keyPress&0xFF==ord('q'):
#print ('lista: ---' +str(lista))
break
cv2.destroyAllWindows()
#cap.release()
#Capture of High Resolution
try:
cap=cv2.VideoCapture(directorioDeVideos+nameSourceVideo)
#cap=cv2.VideoCapture('officialTrialVideos/sar.mp4')
ret, frame1=cap.read()
except:
print('accediendo a imagen de placas...')
try:
frame1=cv2.imread(PathsAndNames.folderDeInstalacion+'/placa.jpg')
frame=cv2.resize(frame1,(640,480))
print('Se cargo imagen de Alta')
imag=1
except:
print('No se encontro imagen...')
imag=0
if imag==0:
try:
cap=cv2.VideoCapture(1)
cap.set(3,2560)
cap.set(4,1920)
ret, frame1=cap.read()
frame=cv2.resize(frame1,(640,480))
print('Se cargo video(1)')
except:
try:
cap=cv2.VideoCapture(0)
ret, frame1=cap.read()
frame=cv2.resize(frame1,(640,480))
print('Se cargo video(0)')
except:
frame=np.zeros((480,640),np.uint8)
print('Se creo Mascara')
cv2.namedWindow('FrameDeAltaResolucion')
cv2.setMouseCallback('FrameDeAltaResolucion', get_BigRectangle)
print('Select 2 points Press "q" to cut image')
while(len(listaAux2)<3):
cv2.imshow('FrameDeAltaResolucion',frame)
keyPress = cv2.waitKey()
if keyPress&0xFF==ord('q'):
print('Full Resolution saved!!')
break
cv2.destroyAllWindows()
print('select 2 points and Press "z" to zoom on traffic Light zone')
frame=frame1[listCorteAltaRes[0][1]:listCorteAltaRes[1][1],listCorteAltaRes[0][0]:listCorteAltaRes[1][0]]
imagenCortada=frame.copy()
#print(frame1.shape)
#print(frame.shape)
#print(listCorteAltaRes)
Const_X=frame.shape[1]/640
Const_Y=frame.shape[0]/480
frame=cv2.resize(frame,(640,480))
listaAux2=[]
cv2.namedWindow('FrameResolucion')
cv2.setMouseCallback('FrameResolucion', get_Big_Sem_Polig)
while(1):
cv2.imshow('FrameResolucion',frame)
keyPress = cv2.waitKey()
if keyPress&0xFF==ord('z'):
frame=frame[listaAux2[0][1]:listaAux2[1][1],listaAux2[0][0]:listaAux2[1][0]]
Punto_X0=listaAux2[0][0]
Punto_Y0=listaAux2[0][1]
const_y_zoom=frame.shape[0]/240
const_x_zoom=frame.shape[1]/320
#print('Full Resolution TRAFFIC LIGHT saved!!')
break
cv2.destroyAllWindows()
frame=cv2.resize(frame,(320,240))
cv2.namedWindow('FrameToTrafficLight')
cv2.setMouseCallback('FrameToTrafficLight', get_TrafficLight)
print('select traffic Light points and Press "q" to exit and save')
while(1):
cv2.imshow('FrameToTrafficLight',frame)
keyPress = cv2.waitKey()
if keyPress&0xFF==ord('q'):
print('Full Resolution TRAFFIC LIGHT saved!!')
break
cv2.destroyAllWindows()
while(1):
vrx=np.array([[listaSemaAlta]],np.int32)
pts=vrx.reshape((-1,1,2))
cv2.polylines(imagenCortada,[pts],True,(0,255,255))
for punto in lista[-1]:
x = round(punto[0])
y = round(punto[1])
cv2.circle(imagenCortada, (x,y),2,(0,255,0),-1)
cv2.imshow('imagen Final',cv2.resize(imagenCortada,(640,480)))
keyPress = cv2.waitKey()
if keyPress&0xFF==ord('q'):
print('Done!!')
break
print('Standarizing data:')
convertirAJSON(lista)
cv2.destroyAllWindows()
|
AlvaroRQ/prototipo
|
install.py
|
Python
|
gpl-3.0
| 14,799
|
# You have been given an array A consisting of N integers. All the elements in this array A are unique. You have to
# answer some queries based on the elements of this array. Each query will consist of a single integer x. You need to
# print the rank based position of this element in this array considering that the array is 1 indexed. The rank
# based position of an element in an array is its position in the array when the array has been sorted in ascending order.
#
# Note: It is guaranteed that all the elements in this array are unique and for each x belonging to a query, value ′x′
# shall exist in the array
#
# Input Format
#
# The first line consists of a single integer N denoting the size of array A. The next line contains N unique integers,
# denoting the content of array A. The next line contains a single integer q denoting the number of queries. Each of
# the next q lines contains a single integer x denoting the element whose rank based position needs to be printed.
#
# Output Format
#
# You need to print q integers denoting the answer to each query.
#
# Constraints
#
# 1≤N≤105
# 1≤A[i]≤109
# 1≤q≤105
# 1≤x≤109
#
# SAMPLE INPUT
# 5
# 1 2 3 4 5
# 5
# 1
# 2
# 3
# 4
# 5
#
# SAMPLE OUTPUT
# 1
# 2
# 3
# 4
# 5
n = int(input())
array = [int(i) for i in input().split()]
array.insert(0, 0)
array.sort()
q = int(input())
def binarySearch(low, high, element):
while(low <= high):
mid = (low + high) // 2
if array[mid] == element:
return mid
elif array[mid] < element:
low = mid + 1
else:
high = mid - 1
for i in range(q):
number = int(input())
print(binarySearch(0,len(array), number))
|
OmkarPathak/Python-Programs
|
CompetitiveProgramming/HackerEarth/Algorithms/Searching/P06_RankIt.py
|
Python
|
gpl-3.0
| 1,665
|
# Copyright 2015 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: data_notes
:platform: Unix
:synopsis: A module containing extended doc strings for the data module.
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
def _set_preview_note():
"""
Each ``preview_list`` element should be of the form
``start:stop:step:chunk``, where ``stop``, ``step`` and ``chunk`` are
optional (defaults: ``stop``=``start``+ 1, ``step``= 1, ``chunk`` = 1)
but must be given in that order.
.. note::
**start:stop[:step]**
represents the set of indices specified by:
>>> indices = range(start, stop[, step])
For more information see :func:`range`
**start:stop:step:chunk (chunk > 1)**
represents the set of indices specified by:
>>> a = np.tile(np.arange(start, stop, step), (chunk, 1))
>>> b = np.transpose(np.tile(np.arange(chunk)-chunk/2, \
(a.shape[1], 1)))
>>> indices = np.ravel(np.transpose(a + b))
Chunk indicates how many values to take around each value in
``range(start, stop, step)``. It is only available for slicing
dimensions.
.. warning:: If any indices are out of range (or negative)
then the list is invalid. When chunk > 1, new start and
end values will be:
>>> new_start = start - int(chunk/2)
>>> new_end = range(start, stop, step)[-1] + \
(step - int(chunk/2))
**accepted values**:
Each entry is executed using :func:`eval` so simple formulas are\
allowed and may contain the following keywords:
* ``:`` is a simplification for 0:end:1:1 (all values)
* ``mid`` is int(shape[dim]/2)-1
* ``end`` is shape[dim]
"""
def image_key():
"""
This is a helper function to be used after :meth:`savu.data.\
data_structures.data_create.DataCreate.create_dataset`,
>>> out_dataset[0].create_dataset(in_dataset[0])
>>> out_dataset[0].trim_output_data(in_dataset[0], image_key=0)
if in_dataset[0] is a plugin input dataset with an image_key and 0 is
the data index.
"""
def _create():
"""
.. note:: **Creating a dataset**
Each new dataset requires the following information:
* ``shape``
* ``axis_labels``
* ``patterns``
This function can be used to setup the required information in one
of two ways:
1. Passing a ``Data`` object as the only argument: All required
information is coped from this data object. For example,
>>> out_dataset[0].create_dataset(in_dataset[0])
2. Passing kwargs: ``shape`` and ``axis_labels`` are required
(see above for other optional arguments). For example,
>>> out_dataset[0].create_dataset(axis_labels=labels, \
shape=new_shape)
.. warning:: If ``pattern`` keyword is not used, patterns must be added
after :meth:`~savu.data.data_structures.data_create.DataCreate.\
create_dataset` by calling :func:`~savu.data.data_structures.data.Data.\
add_pattern`.
"""
def _shape():
"""
.. note::
``shape`` keyword argument
Options to pass are:
1. Data object: Copy shape from the Data object.
>>> out_dataset[0].create_dataset(axis_labels=labels, \
shape=in_dataset[0])
2. tuple: Define shape explicity.
>>> out_dataset[0].create_dataset(axis_labels=labels, \
shape=(10, 20, 30))
"""
def axis_labels():
"""
.. note::
``axis_labels`` keyword argument
Options to pass are:
1. Data object: Copy all labels from the Data object.
>>> out_dataset[0].create_dataset(axis_labels=in_dataset[0], \
shape=new_shape)
2. {Data_obj: list}: Copy labels from the Data object and then
remove or insert.
* To remove dimensions: list_entry = 'dim'. For example, to
remove the first and last axis_labels from the copied list:
>>> out_dataset[0].create_dataset(axis_labels=\
{in_dataset[0]: ['1', '-1']), shape=new_shape})
* To add/replace dimensions: list_entry = 'dim.name.unit'.
>>> out_dataset[0].create_dataset(axis_labels={in_dataset[0]: \
['2.det_x.pixel', '3.det_y.pixel']}, shape=new_shape)
* To insert dimensions: list_entry = '~dim.name.unit'.
>>> out_dataset[0].create_dataset(axis_labels={in_dataset[0]: \
['~2.det_x.pixel', '~3.det_y.pixel']}, shape=new_shape)
(or a combination, where each successive step is applied after
the previous changes have been made.)
3. list: Where each element is of the form 'name.unit'.
>>> out_dataset[0].create_dataset(axis_labels=['rotation.deg',\
'det_x.pixel', 'det_y.pixel'], shape=new_shape)
"""
def patterns():
"""
.. note::
``patterns`` keyword argument
Options to pass are:
1. Data object: Copy all patterns from the Data object.
>>> out_dataset[0].create_dataset(axis_labels=labels, \
shape=new_shape, patterns=in_dataset[0])
2. {Data_obj: list}: Copy only the patterns given in the list
from the Data object.
* Copy the patterns: list_entry = 'name'
>>> out_dataset[0].crate_dataset(axis_labels=labels, \
shape=new_shape, patterns={in_dataset[0], ['SINOGRAM', 'PROJECTION']})
* Copy patterns but remove dimensions: list_entry = \
'name1.r1,r2...':
>>> out_dataset[0].crate_dataset(axis_labels=labels, \
shape=new_shape, patterns={in_dataset[0], ['SINOGRAM.1', 'PROJECTION.1']})
* Copy ALL patterns but remove dimensions: list_entry = \
'\*.r1,r2...':
>>> out_dataset[0].crate_dataset(axis_labels=labels, \
shape=new_shape, patterns={in_dataset[0], '*.0'})
"""
def _padding():
"""
Either 'dim.pad', 'dim.after.pad' or 'dim.before.pad', where 'dim' and\
'pad' are integers and give the dimension to pad and the the pad \
amount respectively. The keywords 'before' and 'after' specify padding\
'before' or 'after' the original dimension index (if neither are\
specified the both directions will be padded. """
|
FedeMPouzols/Savu
|
savu/data/data_structures/data_notes.py
|
Python
|
gpl-3.0
| 7,157
|
import os
import inspect
import sys
class BlockStore:
def __init__(self, input_file, block_size, output_dir):
self.input_file = input_file
self.block_size = block_size
file_size = os.stat(input_file).st_size
print 'file_size: %d' % file_size
#Should handle this later on.
if (file_size < block_size):
print 'File provided is smaller than the deduplication block size.'
sys.exit(0)
if not (os.path.isdir(output_dir)):
print 'Output directory "%s" does not exist. Will create..' % output_dir
os.makedirs(output_dir)
try:
self.file_fp = os.open(self.input_file, os.O_DIRECT | os.O_RDONLY)
except Exception as e:
frame = inspect.currentframe()
info = inspect.getframeinfo(frame)
print '\t[fopen: an %s exception occured | line: %d]' % (type(e).__name__, info.lineno)
sys.exit(0)
def get_sync(self, byte_offset=0):
block = ''
try:
block = os.read(self.file_fp, self.block_size)
except Exception as e:
frame = inspect.currentframe()
info = inspect.getframeinfo(frame)
print '\t[read: an %s exception occured | line: %d]' % (type(e).__name__, info.lineno)
sys.exit(0);
return block
|
spapageo0x01/dioskrS
|
layer_block.py
|
Python
|
gpl-3.0
| 1,354
|
"""geonature samples
Revision ID: 3d0bf4ee67d1
Create Date: 2021-09-27 18:00:45.818766
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3d0bf4ee67d1'
down_revision = None
branch_labels = ('geonature-samples',)
depends_on = (
'geonature',
)
def upgrade():
op.execute("""
INSERT INTO gn_meta.sinp_datatype_protocols (
unique_protocol_id,
protocol_name,
protocol_desc,
id_nomenclature_protocol_type,
protocol_url)
VALUES (
'9ed37cb1-803b-4eec-9ecd-31880475bbe9',
'hors protocole',
'observation réalisées hors protocole',
ref_nomenclatures.get_id_nomenclature('TYPE_PROTOCOLE','1'),
null)
""")
def downgrade():
op.execute("""
DELETE FROM gn_meta.sinp_datatype_protocols
WHERE unique_protocol_id = '9ed37cb1-803b-4eec-9ecd-31880475bbe9'
""")
|
PnX-SI/GeoNature
|
backend/geonature/migrations/versions/3d0bf4ee67d1_geonature_samples.py
|
Python
|
gpl-3.0
| 923
|
'''
Created on Dec 5, 2016
@author: paveenju
'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib2tikz.save as tikz_save
import utils.functions as fn
if __name__ == '__main__':
pass
def axes():
plt.axhline(0, alpha=.1)
plt.axvline(0, alpha=.1)
# input variables
dL = np.array([0.4, 0.6, 0.9])
P1, P2 = [1.0, 0.0, 0], [-1.0, 0.0, 0]
d = np.linalg.norm(np.mat(P1)-np.mat(P2))
c = d/2
A = dL/2
# data generation
x_p = np.linspace(-3, 3, 100)
y_p = np.linspace(-3, 3, 100)
x_p, y_p = np.meshgrid(x_p, y_p)
x, y, h, k = fn.linear_transformation(P1, P2, x_p, y_p)
# matplotlib
mpl.rcParams['lines.color'] = 'k'
mpl.rcParams['axes.prop_cycle'] = mpl.cycler('color', ['k'])
for a in A:
plt.contour(x_p, y_p,
((x**2/a**2) - (y**2/(c**2-a**2)) - 1),
[0], colors='b')
axes()
plt.annotate(r'$\tau_1$', xy=(0, 0), xytext=(0.67, 2.8), fontsize=20)
plt.annotate(r'$\tau_2$', xy=(0, 0), xytext=(0.9, 2.5), fontsize=20)
plt.annotate(r'$\tau_3$', xy=(0, 0), xytext=(1.2, 2.1), fontsize=20)
plt.text(1.75, 0.5, r'$\tau_1=0.4$' + '\n' + r'$\tau_2=0.6$' + '\n' + r'$\tau_2=0.9$',
bbox={'facecolor':'white', 'alpha':0.5, 'pad':10},
fontsize=20)
plt.plot(P1[0], P1[1], 'xr', mew=5, ms=15)
plt.plot(P2[0], P2[1], 'xr', mew=5, ms=15)
#plt.show()
tikz_save('../output/figure2_2a.tex')
|
paveenju/mlat-sim
|
main/figure2_2a.py
|
Python
|
gpl-3.0
| 1,441
|
#----------------------------------------------------------------------
# Copyright 2012, 2013 Arndt Droullier, Nive GmbH. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
__doc__ = """
"""
from nive.i18n import _
from nive.definitions import FieldConf, ViewConf, ViewModuleConf, Conf
# view module definition ------------------------------------------------------------------
#@nive_module
configuration = ViewModuleConf(
id = "userview",
name = _(u"User signup"),
static = "nive.userdb.userview:static",
containment = "nive.userdb.app.UserDB",
context = "nive.userdb.root.root",
view = "nive.userdb.userview.view.UserView",
templates = "nive.userdb.userview:",
permission = "view"
)
t = configuration.templates
configuration.views = [
# User Views
ViewConf(name="login", attr="login", renderer=t+"loginpage.pt"),
ViewConf(name="signup", attr="create", renderer=t+"signup.pt", permission="signup"),
ViewConf(name="update", attr="update", renderer=t+"update.pt", permission="updateuser"),
ViewConf(name="resetpass",attr="resetpass",renderer=t+"resetpass.pt"),
ViewConf(name="logout", attr="logout"),
# disabled
#ViewConf(name="mailpass", attr="mailpass", renderer=t+"mailpass.pt"),
]
# view and form implementation ------------------------------------------------------------------
from nive.views import BaseView, Unauthorized, Mail
from nive.forms import ObjectForm
class UserForm(ObjectForm):
"""
Extended User form
"""
def __init__(self, view=None, loadFromType=None, context=None, request=None, app=None, **kw):
ObjectForm.__init__(self, view=view, loadFromType=loadFromType)
self.actions = [
Conf(id="default", method="StartForm", name=_(u"Initialize"), hidden=True),
Conf(id="defaultEdit",method="LoadUser", name=_(u"Initialize"), hidden=True),
Conf(id="create", method="AddUser", name=_(u"Signup"), hidden=False, options={"renderSuccess":False}),
Conf(id="edit", method="Update", name=_(u"Confirm"), hidden=False),
Conf(id="mailpass", method="MailPass", name=_(u"Mail password"), hidden=False),
Conf(id="resetpass", method="ResetPass", name=_(u"Reset password"), hidden=False),
Conf(id="login", method="Login", name=_(u"Login"), hidden=False),
]
self.subsets = {
"create": {"fields": ["name", "password", "email", "surname", "lastname"],
"actions": ["create"],
"defaultAction": "default"},
"create2":{"fields": ["name", "email"],
"actions": ["create"],
"defaultAction": "default"},
"edit": {"fields": ["email",
FieldConf(id="password", name=_("Password"), datatype="password", required=False, settings={"update": True}),
"surname", "lastname"],
"actions": ["defaultEdit", "edit"],
"defaultAction": "defaultEdit"},
"login": {"fields": ["name", FieldConf(id="password", name=_("Password"), datatype="password", settings={"single": True})],
"actions": ["login"],
"defaultAction": "default"},
"mailpass":{"fields": ["email"],
"actions": ["mailpass"],
"defaultAction": "default"},
"resetpass":{"fields": ["email"],
"actions": ["resetpass"],
"defaultAction": "default"},
}
self.activate = 1
self.generatePW = 0
self.notify = True
self.mail = None
self.mailpass = None
self.groups = ""
self.css_class = "smallform"
def AddUser(self, action, **kw):
"""
Form action: safely add a user
"""
msgs = []
result,data,errors = self.Validate(self.request)
if result:
result, msgs = self.context.AddUser(data,
activate=self.activate,
generatePW=self.generatePW,
mail=self.mail,
groups=self.groups,
notify=self.notify,
currentUser=self.view.User())
return self._FinishFormProcessing(result, data, msgs, errors, **kw)
def LoadUser(self, action, **kw):
"""
Initially load data from obj.
context = obj
"""
user = self.view.User()
if not user:
raise Unauthorized, "User not found."
data = self.LoadObjData(user)
try:
del data["password"]
except:
pass
return data!=None, self.Render(data)
def Update(self, action, **kw):
"""
Form action: safely update a user
"""
user = self.view.User()
if not user:
raise Unauthorized, "User not found."
msgs = []
result,data,errors = self.Validate(self.request)
if result:
uobj = self.context.LookupUser(id=user.id)
result = uobj.SecureUpdate(data, user)
if result:
msgs.append(_(u"OK"))
return self._FinishFormProcessing(result, data, msgs, errors, **kw)
def Login(self, action, **kw):
"""
Form action: user login
"""
redirectSuccess = kw.get("redirectSuccess")
data = self.GetFormValues(self.request)
user, msgs = self.context.Login(data.get("name"), data.get("password"), 0)
if user:
self.context.app.RememberLogin(self.request, user.data.get("name"))
if self.view and redirectSuccess:
self.view.Redirect(redirectSuccess)
return
errors=None
return user, self.Render(data, msgs=msgs, errors=errors)
def MailPass(self, action, **kw):
"""
"""
redirectSuccess = kw.get("redirectSuccess")
return self.ResetPass(action, createNewPasswd=False, **kw)
def ResetPass(self, action, createNewPasswd=True, **kw):
"""
"""
#result, data, e = self.Validate(self.request)
data = self.GetFormValues(self.request)
result, msgs = self.context.MailUserPass(email=data.get("email"), mailtmpl=self.mailpass, createNewPasswd=createNewPasswd, currentUser=self.view.User())
if result:
data = {}
return self._FinishFormProcessing(result, data, msgs, None, **kw)
class UserView(BaseView):
def __init__(self, context, request):
BaseView.__init__(self, context, request)
self.form = UserForm(view=self, loadFromType="user")
self.form.groups = ""
self.publicSignup = False
def create(self):
self.form.activate=1
self.form.generatePW=0
self.form.Setup(subset="create")
return self._render()
def createNotActive(self):
self.form.activate=0
self.form.generatePW=0
self.form.Setup(subset="create")
return self._render()
def createPassword(self):
self.form.activate=1
self.form.generatePW=1
self.form.Setup(subset="create2")
return self._render()
def update(self):
user=self.User()
if user and user.id == 0:
return {u"content": _(u"Your current user can only be edited on file system level."), u"result": False, u"head": self.form.HTMLHead()}
self.form.Setup(subset="edit")
try:
result, data, action = self.form.Process()
return {u"content": data, u"result": result, u"head": self.form.HTMLHead()}
except Unauthorized:
return {u"content": _(u"User not found"), u"result": False, u"head": self.form.HTMLHead()}
def mailpass(self):
self.form.startEmpty = True
self.form.mail = Mail(_(u"Your password"), "nive.userdb:userview/mailpassmail.pt")
self.form.Setup(subset="mailpass")
return self._render()
def resetpass(self):
self.form.startEmpty = True
self.form.mail = Mail(_(u"Your new password"), "nive.userdb:userview/resetpassmail.pt")
self.form.Setup(subset="resetpass")
return self._render()
def login(self):
self.form.Setup(subset="login")
user = self.UserName()
if not user:
self.form.startEmpty = True
#self.form.renderOneColumn = True
redirect = self.GetFormValue(u"redirect")
if not redirect:
try:
redirect = self.context.app.portal.configuration.loginSuccessUrl
except:
redirect = self.request.url
result, data, action = self.form.Process(redirectSuccess=redirect)
return {u"content": data, u"result": result, u"head": self.form.HTMLHead()}
return {u"content": u"", u"result": True, u"head": self.form.HTMLHead()}
def logoutlink(self):
return {}
def logout(self):
app = self.context.app
user = self.UserName()
a = self.context.Logout(user)
app.ForgetLogin(self.request)
redirect = self.GetFormValue(u"redirect")
if not redirect:
try:
redirect = self.context.app.portal.configuration.logoutSuccessUrl
except:
redirect = self.context.app.portal.configuration.portalDefaultUrl
if redirect:
if redirect.find(u"lo=1")==-1:
if redirect.find(u"?")==-1:
redirect+=u"?lo=1"
else:
redirect+=u"&lo=1"
self.Redirect(redirect)
return {}
def logouturl(self):
try:
return self.context.app.portal.configuration.logoutUrl
except:
return self.request.url
def _render(self):
result, data, action = self.form.Process()
return {u"content": data, u"result": result, u"head": self.form.HTMLHead()}
|
nive-cms/nive
|
nive/userdb/userview/view.py
|
Python
|
gpl-3.0
| 11,106
|
# Copyright (c) 2014-2017 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject, Gio, GLib
from lollypop.define import Lp, Type
class TouchHelper(GObject.GObject):
"""
Allow to launch a function after a long click
Can't get touch gtk support to work with python
"""
def __init__(self, widget, action, shortcut):
"""
Init touch helper
@param widget as Gtk.Widget
@param action as str/None
@param shortcut as str/None
"""
GObject.GObject.__init__(self)
self.__timeout_id = None
widget.connect("button-press-event", self.__on_button_press)
widget.connect("button-release-event", self.__on_button_release)
if action is not None:
new_action = Gio.SimpleAction.new(action, None)
new_action.connect("activate", self.__on_action_activate)
Lp().add_action(new_action)
Lp().set_accels_for_action("app.%s" % action, [shortcut])
def set_short_func(self, short_func, *args):
"""
@param short func as function
@param short args
"""
self.__short_func = short_func
self.__short_args = args
def set_long_func(self, long_func, *args):
"""
@param long func as function
@param long args
"""
self.__long_func = long_func
self.__long_args = args
#######################
# PRIVATE #
#######################
def __launch_long_func(self):
"""
Launch long func
"""
self.__timeout_id = Type.NONE
self.__long_func(self.__long_args)
def __on_action_activate(self, action, param):
"""
Launch short func
@param action as Gio.SimpleAction
@param param as GLib.Variant
"""
self.__short_func(self.__short_args)
def __on_button_press(self, widget, event):
"""
Launch long func
@param widget as Gtk.Widget
@param event as Gdk.Event
"""
self.__timeout_id = GLib.timeout_add(500,
self.__launch_long_func)
return True
def __on_button_release(self, widget, event):
"""
Launch short func if needed
@param widget as Gtk.Widget
@param event as Gdk.Event
"""
# Ignore this release event, long func called
if self.__timeout_id == Type.NONE:
self.__timeout_id = None
return True
elif self.__timeout_id is not None:
GLib.source_remove(self.__timeout_id)
self.__timeout_id = None
if event is None or event.button == 1:
self.__short_func(self.__short_args)
else:
self.__long_func(self.__long_args)
return True
|
gnumdk/lollypop
|
lollypop/touch_helper.py
|
Python
|
gpl-3.0
| 3,548
|
from genesis2.core.core import Plugin, implements
from genesis2.interfaces.gui import IMeter
class BaseMeter (Plugin):
"""
Meters are determinable values (int, float, bool) representing system status
(sysload, memory usage, service status, etc.) which are used and exported
over HTTP by ``health`` builtin plugin.
- ``name`` - `str`, meter name
- ``text`` - `str`, text shown on the specific meter widget
- ``category`` - `str`, meter category name
- ``type`` - `str`, one of 'binary', 'linear', 'decimal'
- ``transform`` - `str`, value->text transform applied to meter. One of
'float', 'fsize', 'percent', 'fsize_percent', 'yesno', 'onoff', 'running'
"""
implements(IMeter)
abstract = True
multi_instance = True
name = 'Unknown'
text = ''
category = ''
type = None
transform = None
def prepare(self, variant=None):
self = self.__class__(self.app)
self.variant = variant
self.init()
return self
def init(self):
"""
Implementation may perform preparations based on ``self.variant`` here.
"""
def get_variants(self):
"""
Implementation should return list of meter 'variants' here.
"""
return ['None']
def format_value(self):
return None
class BinaryMeter (BaseMeter):
"""
Base class for binary value meters
"""
abstract = True
type = 'binary'
def get_value(self):
"""
Implementation should return binary meter value
"""
def format_value(self):
BaseMeter.format_value(self)
return {'value': self.get_value()}
class DecimalMeter (BaseMeter):
"""
Base class for decimal/float value meters
"""
abstract = True
type = 'decimal'
def get_value(self):
"""
Implementation should return decimal meter value
"""
def format_value(self):
BaseMeter.format_value(self)
return {'value': self.get_value()}
class LinearMeter (BaseMeter):
"""
Base class for decimal/float value meters with min/max range
"""
abstract = True
type = 'linear'
def get_value(self):
"""
Implementation should return decimal meter value
"""
return 0
def get_max(self):
"""
Implementation should return decimal meter range maximum
"""
return 0
def get_min(self):
"""
Implementation should return decimal meter range minimum
"""
return 0
def format_value(self):
BaseMeter.format_value(self)
return {
'value': self.get_value(),
'max': self.get_max(),
'min': self.get_min(),
}
|
kudrom/genesis2
|
genesis2/plugins/health/providers.py
|
Python
|
gpl-3.0
| 2,769
|
from GangaCore.GPIDev.Schema import *
from GangaCore.GPIDev.Lib.Tasks.common import *
from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform
from GangaCore.GPIDev.Lib.Job.Job import JobError
from GangaCore.GPIDev.Lib.Registry.JobRegistry import JobRegistrySlice, JobRegistrySliceProxy
from GangaCore.Core.exceptions import ApplicationConfigurationError
from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform
from GangaCore.GPIDev.Lib.Tasks.TaskLocalCopy import TaskLocalCopy
from GangaCore.Utility.logging import getLogger
from .ND280Unit_CSVEvtList import ND280Unit_CSVEvtList
from GangaND280.ND280Dataset.ND280Dataset import ND280LocalDataset
from GangaND280.ND280Splitter import splitCSVFile
import GangaCore.GPI as GPI
import os
logger = getLogger()
class ND280Transform_CSVEvtList(ITransform):
_schema = Schema(Version(1,0), dict(list(ITransform._schema.datadict.items()) + list({
'nbevents' : SimpleItem(defvalue=-1,doc='The number of events for each unit'),
}.items())))
_category = 'transforms'
_name = 'ND280Transform_CSVEvtList'
_exportmethods = ITransform._exportmethods + [ ]
def __init__(self):
super(ND280Transform_CSVEvtList,self).__init__()
def createUnits(self):
"""Create new units if required given the inputdata"""
# call parent for chaining
super(ND280Transform_CSVEvtList,self).createUnits()
# Look at the application schema and check if there is a csvfile variable
try:
csvfile = self.application.csvfile
except AttributeError:
logger.error('This application doesn\'t contain a csvfile variable. Use another Transform !')
return
subsets = splitCSVFile(self.application.csvfile, self.nbevents)
for s,sub in enumerate(subsets):
# check if this data is being run over by checking all the names listed
ok = False
for unit in self.units:
if unit.subpartid == s:
ok = True
if ok:
continue
# new unit required for this dataset
unit = ND280Unit_CSVEvtList()
unit.name = "Unit %d" % len(self.units)
unit.subpartid = s
unit.eventswanted = sub
unit.inputdata = self.inputdata[0]
self.addUnitToTRF( unit )
def createChainUnit( self, parent_units, use_copy_output = True ):
"""Create a chained unit using the output data from the given units"""
# check all parent units for copy_output
copy_output_ok = True
for parent in parent_units:
if not parent.copy_output:
copy_output_ok = False
# all parent units must be completed so the outputfiles are filled correctly
for parent in parent_units:
if parent.status != "completed":
return None
if not use_copy_output or not copy_output_ok:
unit = ND280Unit_CSVEvtList()
unit.inputdata = ND280LocalDataset()
for parent in parent_units:
# loop over the output files and add them to the ND280LocalDataset - THIS MIGHT NEED SOME WORK!
job = GPI.jobs(parent.active_job_ids[0])
for f in job.outputfiles:
# should check for different file types and add them as appropriate to the dataset
# self.inputdata (== TaskChainInput).include/exclude_file_mask could help with this
# This will be A LOT easier with Ganga 6.1 as you can easily map outputfiles -> inputfiles!
unit.inputdata.names.append( os.path.join( job.outputdir, f.namePattern ) )
else:
unit = ND280Unit_CSVEvtList()
unit.inputdata = ND280LocalDataset()
for parent in parent_units:
# unit needs to have completed and downloaded before we can get file list
if parent.status != "completed":
return None
# we should be OK so copy all output to the dataset
for f in parent.copy_output.files:
unit.inputdata.names.append( os.path.join( parent.copy_output.local_location, f ) )
return unit
|
ganga-devs/ganga
|
ganga/GangaND280/Tasks/ND280Transform_CSVEvtList.py
|
Python
|
gpl-3.0
| 4,112
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from collections import defaultdict
from datetime import datetime
from lxml import etree
from os.path import join
from xml.sax.saxutils import escape
import config
import misc
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all english small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Helper functions for translating fields
#
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
# which elements are translated inline
TRANSLATED_ELEMENTS = {
'abbr', 'b', 'bdi', 'bdo', 'br', 'cite', 'code', 'data', 'del', 'dfn', 'em',
'font', 'i', 'ins', 'kbd', 'keygen', 'mark', 'math', 'meter', 'output',
'progress', 'q', 'ruby', 's', 'samp', 'small', 'span', 'strong', 'sub',
'sup', 'time', 'u', 'var', 'wbr', 'text',
}
# which attributes must be translated
TRANSLATED_ATTRS = {
'string', 'help', 'sum', 'avg', 'confirm', 'placeholder', 'alt', 'title',
}
avoid_pattern = re.compile(r"[\s\n]*<!DOCTYPE", re.IGNORECASE)
class XMLTranslator(object):
""" A sequence of serialized XML/HTML items, with some of them to translate
(todo) and others already translated (done). The purpose of this object
is to simplify the handling of phrasing elements (like <b>) that must be
translated together with their surrounding text.
For instance, the content of the "div" element below will be translated
as a whole (without surrounding spaces):
<div>
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
<b>sed</b> do eiusmod tempor incididunt ut labore et dolore
magna aliqua. <span class="more">Ut enim ad minim veniam,
<em>quis nostrud exercitation</em> ullamco laboris nisi ut
aliquip ex ea commodo consequat.</span>
</div>
"""
def __init__(self, callback, method, parser=None):
self.callback = callback # callback function to translate terms
self.method = method # serialization method ('xml' or 'html')
self.parser = parser # parser for validating translations
self._done = [] # translated strings
self._todo = [] # todo strings that come after _done
self.needs_trans = False # whether todo needs translation
def todo(self, text, needs_trans=True):
self._todo.append(text)
if needs_trans and text.strip():
self.needs_trans = True
def all_todo(self):
return not self._done
def get_todo(self):
return "".join(self._todo)
def flush(self):
if self._todo:
todo = "".join(self._todo)
done = self.process_text(todo) if self.needs_trans else todo
self._done.append(done)
del self._todo[:]
self.needs_trans = False
def done(self, text):
self.flush()
self._done.append(text)
def get_done(self):
""" Complete the translations and return the result. """
self.flush()
return "".join(self._done)
def process_text(self, text):
""" Translate text.strip(), but keep the surrounding spaces from text. """
term = text.strip()
trans = term and self.callback(term)
if trans:
try:
# parse the translation to validate it
etree.fromstring("<div>%s</div>" % encode(trans), parser=self.parser)
except etree.ParseError:
# fallback: escape the translation
trans = escape(trans)
text = text.replace(term, trans)
return text
def process_attr(self, attr):
""" Translate the given node attribute value. """
term = attr.strip()
trans = term and self.callback(term)
return attr.replace(term, trans) if trans else attr
def process(self, node):
""" Process the given xml `node`: collect `todo` and `done` items. """
if (
isinstance(node, SKIPPED_ELEMENT_TYPES) or
node.tag in SKIPPED_ELEMENTS or
node.get("t-translation", "").strip() == "off" or
node.tag == "attribute" and node.get("name") not in TRANSLATED_ATTRS
):
# do not translate the contents of the node
tail, node.tail = node.tail, None
self.done(etree.tostring(node, method=self.method))
self.todo(escape(tail or ""))
return
# process children nodes locally in child_trans
child_trans = XMLTranslator(self.callback, self.method, parser=self.parser)
if node.text:
if avoid_pattern.match(node.text):
child_trans.done(escape(node.text)) # do not translate <!DOCTYPE...
else:
child_trans.todo(escape(node.text))
for child in node:
child_trans.process(child)
if (child_trans.all_todo() and
node.tag in TRANSLATED_ELEMENTS and
not any(attr.startswith("t-") for attr in node.attrib)):
# serialize the node element as todo
self.todo(self.serialize(node.tag, node.attrib, child_trans.get_todo()),
child_trans.needs_trans)
else:
# complete translations and serialize result as done
for attr in TRANSLATED_ATTRS:
if node.get(attr):
node.set(attr, self.process_attr(node.get(attr)))
self.done(self.serialize(node.tag, node.attrib, child_trans.get_done()))
# add node tail as todo
self.todo(escape(node.tail or ""))
def serialize(self, tag, attrib, content):
""" Return a serialized element with the given `tag`, attributes
`attrib`, and already-serialized `content`.
"""
if content:
elem = etree.tostring(etree.Element(tag, attrib), method='xml')
assert elem.endswith("/>")
return "%s>%s</%s>" % (elem[:-2], content, tag)
else:
return etree.tostring(etree.Element(tag, attrib), method=self.method)
def xml_translate(callback, value):
""" Translate an XML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
trans = XMLTranslator(callback, 'xml')
try:
root = etree.fromstring(encode(value))
trans.process(root)
return trans.get_done()
except etree.ParseError:
# fallback for translated terms: use an HTML parser and wrap the term
wrapped = "<div>%s</div>" % encode(value)
root = etree.fromstring(wrapped, etree.HTMLParser(encoding='utf-8'))
trans.process(root[0][0]) # html > body > div
return trans.get_done()[5:-6] # remove tags <div> and </div>
def html_translate(callback, value):
""" Translate an HTML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
parser = etree.HTMLParser(encoding='utf-8')
trans = XMLTranslator(callback, 'html', parser)
wrapped = "<div>%s</div>" % encode(value)
root = etree.fromstring(wrapped, parser)
trans.process(root[0][0]) # html > body > div
return trans.get_done()[5:-6] # remove tags <div> and </div>
#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s and md5(src)=md5(%s)', (lang, source_type, str(name), source, source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s and md5(src)=md5(%s)', (lang, source_type, source, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from openerp.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from openerp.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the originial uid, so the language may
# be wrong when the admin language differs.
pool = getattr(s, 'pool', None)
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if pool and cr and uid:
lang = pool['res.users'].context_get(cr, uid)['lang']
return lang
def __call__(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
registry = openerp.registry(cr.dbname)
res = registry['ir.translation']._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
cr.close()
return res
_ = GettextAlias()
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
re_escaped_char = re.compile(r"(\\.)")
re_escaped_replacements = {'n': '\n', }
def _sub_replacement(match_obj):
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
# class to handle po files
class TinyPoFile(object):
def __init__(self, buffer):
self.buffer = buffer
def warn(self, msg, *args):
_logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
self.lines = self._get_lines()
self.lines_count = len(self.lines)
self.first = True
self.extra_lines= []
return self
def _get_lines(self):
lines = self.buffer.readlines()
# remove the BOM (Byte Order Mark):
if len(lines):
lines[0] = unicode(lines[0], 'utf8').lstrip(unicode( codecs.BOM_UTF8, "utf8"))
lines.append('') # ensure that the file ends with at least an empty line
return lines
def cur_line(self):
return self.lines_count - len(self.lines)
def next(self):
trans_type = name = res_id = source = trad = None
if self.extra_lines:
trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0)
if not res_id:
res_id = '0'
else:
comments = []
targets = []
line = None
fuzzy = False
while not line:
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0).strip()
while line.startswith('#'):
if line.startswith('#~ '):
break
if line.startswith('#.'):
line = line[2:].strip()
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
# Process the `reference` comments. Each line can specify
# multiple targets (e.g. model, view, code, selection,
# ...). For each target, we will return an additional
# entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
# looks like the translation trans_type is missing, which is not
# unexpected because it is not a GetText standard. Default: 'code'
trans_info[:0] = ['code']
if trans_info and len(trans_info) == 3:
# this is a ref line holding the destination info (model, field, record)
targets.append(trans_info)
elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
fuzzy = True
line = self.lines.pop(0).strip()
if not self.lines:
raise StopIteration()
while not line:
# allow empty lines between comments and msgid
line = self.lines.pop(0).strip()
if line.startswith('#~ '):
while line.startswith('#~ ') or not line.strip():
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0)
# This has been a deprecated entry, don't return anything
return self.next()
if not line.startswith('msgid'):
raise Exception("malformed file: bad line: %s" % line)
source = unquote(line[6:])
line = self.lines.pop(0).strip()
if not source and self.first:
self.first = False
# if the source is "" and it's the first msgid, it's the special
# msgstr with the informations about the traduction and the
# traductor; we skip it
self.extra_lines = []
while line:
line = self.lines.pop(0).strip()
return self.next()
while not line.startswith('msgstr'):
if not line:
raise Exception('malformed file at %d'% self.cur_line())
source += unquote(line)
line = self.lines.pop(0).strip()
trad = unquote(line[7:])
line = self.lines.pop(0).strip()
while line:
trad += unquote(line)
line = self.lines.pop(0).strip()
if targets and not fuzzy:
# Use the first target for the current entry (returned at the
# end of this next() call), and keep the others to generate
# additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
self.extra_lines.append((t, n, r, source, trad, comments))
if name is None:
if not fuzzy:
self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s',
self.cur_line(), source[:30])
return self.next()
return trans_type, name, res_id, source, trad, '\n'.join(comments)
def write_infos(self, modules):
import openerp.release as release
self.buffer.write("# Translation of %(project)s.\n" \
"# This file contains the translation of the following modules:\n" \
"%(modules)s" \
"#\n" \
"msgid \"\"\n" \
"msgstr \"\"\n" \
'''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
'''"Report-Msgid-Bugs-To: \\n"\n''' \
'''"POT-Creation-Date: %(now)s\\n"\n''' \
'''"PO-Revision-Date: %(now)s\\n"\n''' \
'''"Last-Translator: <>\\n"\n''' \
'''"Language-Team: \\n"\n''' \
'''"MIME-Version: 1.0\\n"\n''' \
'''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
'''"Content-Transfer-Encoding: \\n"\n''' \
'''"Plural-Forms: \\n"\n''' \
"\n"
% { 'project': release.description,
'version': release.version,
'modules': reduce(lambda s, m: s + "#\t* %s\n" % m, modules, ""),
'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
}
)
def write(self, modules, tnrs, source, trad, comments=None):
plurial = len(modules) > 1 and 's' or ''
self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules)))
if comments:
self.buffer.write(''.join(('#. %s\n' % c for c in comments)))
code = False
for typy, name, res_id in tnrs:
self.buffer.write("#: %s:%s:%s\n" % (typy, name, res_id))
if typy == 'code':
code = True
if code:
# only strings in python code are python formated
self.buffer.write("#, python-format\n")
if not isinstance(trad, unicode):
trad = unicode(trad, 'utf8')
if not isinstance(source, unicode):
source = unicode(source, 'utf8')
msg = "msgid %s\n" \
"msgstr %s\n\n" \
% (quote(source), quote(trad))
self.buffer.write(msg.encode('utf8'))
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
def _process(format, modules, rows, buffer, lang):
if format == 'csv':
writer = csv.writer(buffer, 'UNIX')
# write header first
writer.writerow(("module","type","name","res_id","src","value","comments"))
for module, type, name, res_id, src, trad, comments in rows:
comments = '\n'.join(comments)
writer.writerow((module, type, name, res_id, src, trad, comments))
elif format == 'po':
writer = TinyPoFile(buffer)
writer.write_infos(modules)
# we now group the translations by source. That means one translation per source.
grouped_rows = {}
for module, type, name, res_id, src, trad, comments in rows:
row = grouped_rows.setdefault(src, {})
row.setdefault('modules', set()).add(module)
if not row.get('translation') and trad != src:
row['translation'] = trad
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
for src, row in sorted(grouped_rows.items()):
if not lang:
# translation template, so no translation value
row['translation'] = ''
elif not row.get('translation'):
row['translation'] = src
writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments'])
elif format == 'tgz':
rows_by_module = {}
for row in rows:
module = row[0]
rows_by_module.setdefault(module, []).append(row)
tmpdir = tempfile.mkdtemp()
for mod, modrows in rows_by_module.items():
tmpmoddir = join(tmpdir, mod, 'i18n')
os.makedirs(tmpmoddir)
pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '')
buf = file(join(tmpmoddir, pofilename), 'w')
_process('po', [mod], modrows, buf, lang)
buf.close()
tar = tarfile.open(fileobj=buffer, mode='w|gz')
tar.add(tmpdir, '')
tar.close()
else:
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).') % format)
translations = trans_generate(lang, modules, cr)
modules = set(t[0] for t in translations)
_process(format, modules, translations, buffer, lang)
del translations
def trans_parse_rml(de):
res = []
for n in de:
for m in n:
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
continue
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
res.append(s.encode("utf8"))
res.extend(trans_parse_rml(n))
return res
def _push(callback, term, source_line):
""" Sanity check before pushing translation terms """
term = (term or "").strip().encode('utf8')
# Avoid non-char tokens like ':' '...' '.00' etc.
if len(term) > 8 or any(x.isalpha() for x in term):
callback(term, source_line)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
if 'all' in modules:
return True
module_dict = {
'ir': 'base',
'res': 'base',
'workflow': 'base',
}
module = object_name.split('.')[0]
module = module_dict.get(module, module)
return module in modules
def _extract_translatable_qweb_terms(element, callback):
""" Helper method to walk an etree document representing
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
:param etree._Element element: root of etree document to extract terms from
:param Callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
for el in element:
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
for att in ('title', 'alt', 'label', 'placeholder'):
if att in el.attrib:
_push(callback, el.attrib[att], el.sourceline)
_extract_translatable_qweb_terms(el, callback)
_push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: Iterable
"""
result = []
def handle_text(text, lineno):
result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
def trans_generate(lang, modules, cr):
dbname = cr.dbname
registry = openerp.registry(dbname)
trans_obj = registry['ir.translation']
model_data_obj = registry['ir.model.data']
uid = 1
query = 'SELECT name, model, res_id, module' \
' FROM ir_model_data'
query_models = """SELECT m.id, m.model, imd.module
FROM ir_model AS m, ir_model_data AS imd
WHERE m.id = imd.res_id AND imd.model = 'ir.model' """
if 'all_installed' in modules:
query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
query_param = None
if 'all' not in modules:
query += ' WHERE module IN %s'
query_models += ' AND imd.module in %s'
query_param = (tuple(modules),)
else:
query += ' WHERE module != %s'
query_models += ' AND imd.module != %s'
query_param = ('__export__',)
query += ' ORDER BY module, model, name'
query_models += ' ORDER BY module, model'
cr.execute(query, query_param)
_to_translate = set()
def push_translation(module, type, name, id, source, comments=None):
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
sanitized_term = (source or '').strip()
try:
# verify the minimal size without eventual xml tags
# wrap to make sure html content like '<a>b</a><c>d</c>' is accepted by lxml
wrapped = "<div>%s</div>" % sanitized_term
node = etree.fromstring(wrapped)
sanitized_term = etree.tostring(node, encoding='UTF-8', method='text')
except etree.ParseError:
pass
# remove non-alphanumeric chars
sanitized_term = re.sub(r'\W+', '', sanitized_term)
if not sanitized_term or len(sanitized_term) <= 1:
return
tnx = (module, source, name, id, type, tuple(comments or ()))
_to_translate.add(tnx)
def push(mod, type, name, res_id, term):
term = (term or '').strip()
if len(term) > 2 or term in ENGLISH_SMALL_WORDS:
push_translation(mod, type, name, res_id, term)
def get_root_view(xml_id):
view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
if view:
while view.mode != 'primary':
view = view.inherit_id
xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
return xml_id
for (xml_name,model,res_id,module) in cr.fetchall():
module = encode(module)
model = encode(model)
xml_name = "%s.%s" % (module, encode(xml_name))
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
Model = registry[model]
if not Model._translate:
# explicitly disabled
continue
obj = Model.browse(cr, uid, res_id)
if not obj.exists():
_logger.warning("Unable to find object %r with id %d", model, res_id)
continue
if model=='ir.model.fields':
try:
field_name = encode(obj.name)
except AttributeError, exc:
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
field_model = registry.get(obj.model)
if (field_model is None or not field_model._translate or
field_name not in field_model._fields):
continue
field_def = field_model._fields[field_name]
if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
name = "%s,%s" % (encode(obj.model), field_name)
for dummy, val in field_def.selection:
push_translation(module, 'selection', name, 0, encode(val))
elif model=='ir.actions.report.xml':
name = encode(obj.report_name)
fname = ""
if obj.report_rml:
fname = obj.report_rml
parse_func = trans_parse_rml
report_type = "report"
elif obj.report_xsl:
continue
if fname and obj.report_type in ('pdf', 'xsl'):
try:
report_file = misc.file_open(fname)
try:
d = etree.parse(report_file)
for t in parse_func(d.iter()):
push_translation(module, report_type, name, 0, t)
finally:
report_file.close()
except (IOError, etree.XMLSyntaxError):
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name, field_def in obj._fields.iteritems():
if getattr(field_def, 'translate', None):
name = model + "," + field_name
try:
value = obj[field_name] or ''
except Exception:
continue
for term in set(field_def.get_trans_terms(value)):
push_translation(module, 'model', name, xml_name, encode(term))
# End of data for ir.model.data query results
cr.execute(query_models, query_param)
def push_constraint_msg(module, term_type, model, msg):
if not hasattr(msg, '__call__'):
push_translation(encode(module), term_type, encode(model), 0, encode(msg))
def push_local_constraints(module, model, cons_type='sql_constraints'):
"""Climb up the class hierarchy and ignore inherited constraints
from other modules"""
term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint'
msg_pos = 2 if cons_type == 'sql_constraints' else 1
for cls in model.__class__.__mro__:
if getattr(cls, '_module', None) != module:
continue
constraints = getattr(cls, '_local_' + cons_type, [])
for constraint in constraints:
push_constraint_msg(module, term_type, model._name, constraint[msg_pos])
for (_, model, module) in cr.fetchall():
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
model_obj = registry[model]
if model_obj._constraints:
push_local_constraints(module, model_obj, 'constraints')
if model_obj._sql_constraints:
push_local_constraints(module, model_obj, 'sql_constraints')
installed_modules = map(
lambda m: m['name'],
registry['ir.module.module'].search_read(cr, uid, [('state', '=', 'installed')], fields=['name']))
path_list = [(path, True) for path in openerp.modules.module.ad_paths]
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
path_list.append((os.path.join(config.config['root_path'], bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
path_list.append((config.config['root_path'], False))
_logger.debug("Scanning modules at paths: %s", path_list)
def get_module_from_path(path):
for (mp, rec) in path_list:
if rec and path.startswith(mp) and os.path.dirname(path) != mp:
path = path[len(mp)+1:]
return path.split(os.path.sep)[0]
return 'base' # files that are not in a module are considered as being in 'base' module
def verified_module_filepaths(fname, path, root):
fabsolutepath = join(root, fname)
frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath
module = get_module_from_path(fabsolutepath)
if ('all' in modules or module in modules) and module in installed_modules:
if os.path.sep != '/':
display_path = display_path.replace(os.path.sep, '/')
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root)
extra_comments = extra_comments or []
if not module: return
src_file = open(fabsolutepath, 'r')
try:
for extracted in extract.extract(extract_method, src_file,
keywords=extract_keywords):
# Babel 0.9.6 yields lineno, message, comments
# Babel 1.3 yields lineno, message, comments, context
lineno, message, comments = extracted[:3]
push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
src_file.close()
for (path, recursive) in path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in osutil.walksymlinks(path):
for fname in fnmatch.filter(files, '*.py'):
babel_extract_terms(fname, path, root)
# mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel
for fname in fnmatch.filter(files, '*.mako'):
babel_extract_terms(fname, path, root, 'mako', trans_type='report')
# Javascript source files in the static/src/js directory, rest is ignored (libs)
if fnmatch.fnmatch(root, '*/static/src/js*'):
for fname in fnmatch.filter(files, '*.js'):
babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[WEB_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
# QWeb template files
if fnmatch.fnmatch(root, '*/static/src/xml*'):
for fname in fnmatch.filter(files, '*.xml'):
babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb',
extra_comments=[WEB_TRANSLATION_COMMENT])
if not recursive:
# due to topdown, first iteration is in first level
break
out = []
# translate strings marked as to be translated
for module, source, name, id, type, comments in sorted(_to_translate):
trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source)
out.append((module, type, name, id, source, encode(trans) or '', comments))
return out
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None):
try:
fileobj = misc.file_open(filename)
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context)
fileobj.close()
return result
except IOError:
if verbose:
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None):
"""Populates the ir_translation table."""
if verbose:
_logger.info('loading translation file for language %s', lang)
if context is None:
context = {}
db_name = cr.dbname
registry = openerp.registry(db_name)
lang_obj = registry.get('res.lang')
trans_obj = registry.get('ir.translation')
iso_lang = misc.get_iso_codes(lang)
try:
ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)])
if not ids:
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
# Parse also the POT: it will possibly provide additional targets.
# (Because the POT comments are correct on Launchpad but not the
# PO comments due to a Launchpad limitation. See LP bug 933496.)
pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
if fileformat == 'csv':
reader = csv.reader(fileobj, quotechar='"', delimiter=',')
# read the first line of the file (it contains columns titles)
for row in reader:
fields = row
break
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
fields = ['type', 'name', 'res_id', 'src', 'value', 'comments']
# Make a reader for the POT file and be somewhat defensive for the
# stable branch.
if fileobj.name.endswith('.po'):
try:
# Normally the path looks like /path/to/xxx/i18n/lang.po
# and we try to find the corresponding
# /path/to/xxx/i18n/xxx.pot file.
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
addons_module_i18n, _ignored = os.path.split(fileobj.name)
addons_module, i18n_dir = os.path.split(addons_module_i18n)
addons, module = os.path.split(addons_module)
pot_handle = misc.file_open(os.path.join(
addons, module, i18n_dir, module + '.pot'))
pot_reader = TinyPoFile(pot_handle)
except:
pass
else:
_logger.info('Bad file format: %s', fileformat)
raise Exception(_('Bad file format: %s') % fileformat)
# Read the POT references, and keep them indexed by source string.
class Target(object):
def __init__(self):
self.value = None
self.targets = set() # set of (type, name, res_id)
self.comments = None
pot_targets = defaultdict(Target)
for type, name, res_id, src, _ignored, comments in pot_reader:
if type is not None:
target = pot_targets[src]
target.targets.add((type, name, res_id))
target.comments = comments
# read the rest of the file
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
def process_row(row):
"""Process a single PO (or POT) entry."""
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
# 'src': ..., 'value': ..., 'module':...}
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
'comments', 'imd_model', 'imd_name', 'module'))
dic['lang'] = lang
dic.update(zip(fields, row))
# discard the target from the POT targets.
src = dic['src']
if src in pot_targets:
target = pot_targets[src]
target.value = dic['value']
target.targets.discard((dic['type'], dic['name'], dic['res_id']))
# This would skip terms that fail to specify a res_id
res_id = dic['res_id']
if not res_id:
return
if isinstance(res_id, (int, long)) or \
(isinstance(res_id, basestring) and res_id.isdigit()):
dic['res_id'] = int(res_id)
if module_name:
dic['module'] = module_name
else:
# res_id is an xml id
dic['res_id'] = None
dic['imd_model'] = dic['name'].split(',')[0]
if '.' in res_id:
dic['module'], dic['imd_name'] = res_id.split('.', 1)
else:
dic['module'], dic['imd_name'] = module_name, res_id
irt_cursor.push(dic)
# First process the entries from the PO file (doing so also fills/removes
# the entries from the POT file).
for row in reader:
process_row(row)
# Then process the entries implied by the POT file (which is more
# correct w.r.t. the targets) if some of them remain.
pot_rows = []
for src, target in pot_targets.iteritems():
if target.value:
for type, name, res_id in target.targets:
pot_rows.append((type, name, res_id, src, target.value, target.comments))
pot_targets.clear()
for row in pot_rows:
process_row(row)
irt_cursor.finish()
trans_obj.clear_caches()
if verbose:
_logger.info("translation file loaded succesfully")
except IOError:
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:
lang = locale.getdefaultlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
nln = locale.normalize(ln)
if nln != ln:
yield nln
for x in process('utf8'): yield x
prefenc = locale.getpreferredencoding()
if prefenc:
for x in process(prefenc): yield x
prefenc = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}.get(prefenc.lower())
if prefenc:
for x in process(prefenc): yield x
yield lang
def resetlocale():
# locale.resetlocale is bugged with some locales.
for ln in get_locales():
try:
return locale.setlocale(locale.LC_ALL, ln)
except locale.Error:
continue
def load_language(cr, lang):
"""Loads a translation terms for a language.
Used mainly to automate language loading at db initialization.
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
:type lang: str
"""
registry = openerp.registry(cr.dbname)
language_installer = registry['base.language.install']
oid = language_installer.create(cr, SUPERUSER_ID, {'lang': lang})
language_installer.lang_install(cr, SUPERUSER_ID, [oid], context=None)
|
AyoubZahid/odoo
|
openerp/tools/translate.py
|
Python
|
gpl-3.0
| 48,927
|
# -*- coding: utf-8 -*-
# This file is part of Gtfslib-python.
#
# Gtfslib-python is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gtfslib-python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gtfslib-python. If not, see <http://www.gnu.org/licenses/>.
"""
@author: Laurent GRÉGOIRE <laurent.gregoire@mecatran.com>
"""
import math
import sys
import unittest
import six
from gtfsplugins.prettycsv import PrettyCsv
class TestPrettyPrinter(unittest.TestCase):
def test_prettyprinter(self):
# Capture standard output
saved_stdout = sys.stdout
try:
out = six.StringIO()
sys.stdout = out
with PrettyCsv(None, fieldnames=[ 'col1', 'col2' ], maxwidth=5) as csv:
csv.writerow({ 'col1': 1, 'col2': 2 })
csv.writerow({ 'col2': 'foobarbaz', 'col1': 11 })
csv.writerow([ 42, 'baz', 'extrawide' ])
output1 = out.getvalue().strip()
out = six.StringIO()
sys.stdout = out
with PrettyCsv(None, maxwidth=5) as csv:
csv.writerow([ 1, 2 ])
csv.writerow([ 11, 'foobarbaz', 'extrawide' ])
output2 = out.getvalue().strip()
out = six.StringIO()
sys.stdout = out
with PrettyCsv(None, fieldnames=[ 'col1', 'col2' ], maxwidth=5) as csv:
csv.writerow([ 1 ])
csv.writerow([ None, 1.42 ])
csv.writerow([ None, 1./3., math.pi ])
output3 = out.getvalue().strip()
finally:
sys.stdout = saved_stdout
self.assertEqual("+------+-------+-------+\n"+
"| col1 | col2 | |\n"+
"+------+-------+-------+\n"+
"| 1 | 2 | |\n"+
"| 11 | fooba | |\n"+
"| 42 | baz | extra |\n"+
"+------+-------+-------+", output1)
self.assertEqual("+----+-------+-------+\n"+
"| 1 | 2 | |\n"+
"| 11 | fooba | extra |\n"+
"+----+-------+-------+", output2)
self.assertEqual("+------+-------+-------+\n"+
"| col1 | col2 | |\n"+
"+------+-------+-------+\n"+
"| 1 | | |\n"+
"| | 1.42 | |\n"+
"| | 0.333 | 3.141 |\n"+
"+------+-------+-------+", output3)
if __name__ == '__main__':
unittest.main()
|
afimb/gtfslib-python
|
test/test_prettyprint.py
|
Python
|
gpl-3.0
| 3,131
|
#!/usr/bin/python
# This file is part of pulseaudio-dlna.
# pulseaudio-dlna is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pulseaudio-dlna is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pulseaudio-dlna. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import SocketServer
import logging
import socket
import struct
import setproctitle
import chardet
from pulseaudio_dlna.discover import RendererDiscover
from pulseaudio_dlna.renderers import RendererHolder
logger = logging.getLogger('pulseaudio_dlna.listener')
class SSDPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
guess = chardet.detect(self.request[0])
packet = self.request[0].decode(guess['encoding'])
lines = packet.splitlines()
if len(lines) > 0 and self._is_notify_method(lines[0]):
self.server.renderers_holder.process_notify_request(packet)
def _is_notify_method(self, method_header):
method = self._get_method(method_header)
return method == 'NOTIFY'
def _get_method(self, method_header):
return method_header.split(' ')[0]
class SSDPListener(SocketServer.UDPServer):
def __init__(
self, stream_server_address, message_queue, plugins,
device_filter=None, device_config=None, renderer_urls=None,
disable_ssdp_listener=False):
self.disable_ssdp_listener = disable_ssdp_listener
self.renderer_urls = renderer_urls
self.renderers_holder = RendererHolder(
stream_server_address, message_queue, plugins, device_filter,
device_config)
if not self.disable_ssdp_listener:
SocketServer.UDPServer.__init__(
self, ('', 1900), SSDPRequestHandler)
multicast = struct.pack(
"=4sl", socket.inet_aton("239.255.255.250"), socket.INADDR_ANY)
self.socket.setsockopt(
socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, multicast)
if self.renderer_urls is not None:
self.renderers_holder.add_renderers_by_url(self.renderer_urls)
else:
discover = RendererDiscover(self.renderers_holder)
discover.search()
logger.info('Discovery complete.')
def run(self):
if not self.disable_ssdp_listener:
setproctitle.setproctitle('ssdp_listener')
SocketServer.UDPServer.serve_forever(self)
class ThreadedSSDPListener(SocketServer.ThreadingMixIn, SSDPListener):
pass
|
leonhandreke/pulseaudio-dlna
|
pulseaudio_dlna/listener.py
|
Python
|
gpl-3.0
| 2,974
|
from django.test import TestCase as DjangoTestCase
from django.conf import settings
from seeder.models import *
from seeder.posters import TwitterPoster
from random import randint as random
from datetime import datetime
import time
import mox
import re
def generate_random_authorized_account():
u = User(username = "foo" + str(random(10000, 99999)))
u.save()
return AuthorizedAccount.objects.create(user = u)
def generate_random_seeder(account = None):
if account is None:
account = generate_random_authorized_account()
return Seeder.objects.create(
twitter_id = random(1000, 9999),
authorized_for = account
)
def generate_random_token(seeder = None):
if seeder is None:
seeder = generate_random_seeder()
return Token.objects.create(
seeder = seeder,
oauth_token = "some token" + str(random(10, 100)),
oauth_token_secret = "some token secret" + str(random(10, 100))
)
def generate_random_update(account = None):
if account is None:
account = generate_random_authorized_account()
return Update.objects.create(
posted_by = account,
original_text = "Hello from Seeder!"
)
def generate_mock_poster(update):
poster = mox.MockObject(TwitterPoster)
poster.post(update)
mox.Replay(poster)
return poster
class TestCase(DjangoTestCase):
def assertPubDateBetween(self, obj, begin, end):
self.assertTrue(obj.pub_date > begin and obj.pub_date < end)
def tearDown(self):
models = (AuthorizedAccount, Token, Seeder, Update, SeededUpdate,)
for model in models:
[obj.delete() for obj in model.objects.all()]
class TestOfSeededUpate(TestCase):
def test_has_a_future_timestamp(self):
foo = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update()
)
self.assertTrue(datetime.now() < foo.pub_date)
def test_retrieves_updates_based_on_availability(self):
first = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.now()
)
second = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.fromtimestamp(time.time() + 1)
)
self.assertEqual(1, len(SeededUpdate.objects.currently_available()))
time.sleep(1.1)
self.assertEqual(2, len(SeededUpdate.objects.currently_available()))
def test_retrieves_updates_that_havenot_been_sent(self):
first = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.now()
)
second = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.now()
)
self.assertEqual(2, len(SeededUpdate.objects.currently_available()))
first.has_sent = 1;
first.save()
self.assertEqual(1, len(SeededUpdate.objects.currently_available()))
def test_send_calls_on_poster(self):
update = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update()
)
poster = generate_mock_poster(update)
update.send(poster)
mox.Verify(poster)
def test_send_marks_updates_as_sent(self):
update = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.now()
)
self.assertEqual(len(SeededUpdate.objects.currently_available()), 1,
"sanity check to ensure value seeded update is present")
update.send(generate_mock_poster(update))
self.assertEqual(len(SeededUpdate.objects.currently_available()), 0,
"SeededUpdate should not be available after being sent")
class TestOfUpdate(TestCase):
def test_creates_seeded_updates_on_save(self):
# sanity check
self.assertEqual(0, len(SeededUpdate.objects.all()))
a = generate_random_authorized_account()
[generate_random_seeder(a) for i in range(10)]
update = Update.objects.create(
posted_by = a,
original_text = "Hello from Seeder!"
)
self.assertEqual(10, len(SeededUpdate.objects.all()))
def test_all_seeded_updates_have_pub_dates_between_1_and_30_minutes(self):
a = generate_random_authorized_account()
generate_random_seeder(a)
update = Update.objects.create(
posted_by = a,
original_text = "Hello from Seeder!"
)
seeded_update = SeededUpdate.objects.get(update = update)
# only uses 59 seconds to avoid possible race condition where
# more than a second elapses between creation and the time this
# test runs
begin_datetime = datetime.fromtimestamp(time.time() + 59)
end_datetime = datetime.fromtimestamp(time.time() + (60 * 30) + 1)
self.assertPubDateBetween(seeded_update, begin_datetime, end_datetime)
def test_only_creates_new_seeded_updates_on_new(self):
a = generate_random_authorized_account()
generate_random_seeder(a)
update = generate_random_update(a)
self.assertEqual(len(SeededUpdate.objects.all()), 1,
"Sanity check")
update.save()
self.assertEqual(len(SeededUpdate.objects.all()), 1,
"Should only create SeededUpdates on save when new")
def test_only_creates_for_non_expired_seeders(self):
a = generate_random_authorized_account()
s1 = generate_random_seeder(a)
s2 = generate_random_seeder(a)
s2.set_expires_on_in_days(-1)
s2.save()
update = generate_random_update(a)
self.assertEquals(len(SeededUpdate.objects.all()), 1,
"should only create one SeededUpdate since on has expired")
class TestOfAuthorizedAccount(TestCase):
def test_default_account_returns_default_account(self):
a = generate_random_authorized_account()
a.twitter_id = settings.SEEDER['default_twitter_id']
a.save()
default_account = AuthorizedAccount.objects.default_account()
self.assertEqual(settings.SEEDER['default_twitter_id'], default_account.twitter_id)
def test_only_pulls_seeders_that_have_not_expired(self):
a = generate_random_authorized_account()
s = generate_random_seeder(a)
self.assertEquals(len(a.seeder_set.currently_available()), 1,
"sanity check: seeder_set.currently_available() should be one")
s.expires_on = datetime.fromtimestamp(time.time() - 60)
s.save()
self.assertEquals(len(a.seeder_set.currently_available()), 0,
"seeder_set.currently_available() should have no seeders")
class TestOfSeeder(TestCase):
def test_automatically_expires_in_30_days(self):
seeder = generate_random_seeder()
expected_expires_on = datetime.fromtimestamp(time.time() + 60*60*24*30).date()
self.assertEquals(seeder.expires_on.date(), expected_expires_on,
"seeder.expires_on should default to 30 days")
def test_can_set_by_expires_by_day(self):
seeder = generate_random_seeder()
seeder.set_expires_on_in_days(7)
self.assertEquals(seeder.expires_on.date(), datetime.fromtimestamp(time.time() + 60*60*24*7).date(),
"seeder.expires_on should be 7 days in the future")
def test_can_take_a_string_as_parameter(self):
seeder = generate_random_seeder()
try:
seeder.set_expires_on_in_days("7")
except TypeError:
self.fail("seeder.set_expires_on_in_days() unable to handle a string")
def generate_mock_settings():
return mox.MockObject(settings)
class StubTwitterApi(object):
number_of_calls = 0
calls = []
def __init__(self, *args, **kwargs):
StubTwitterApi.number_of_calls += 1
def __getattribute__(self, method):
StubTwitterApi.calls.append(method)
return self
def __call__(self, *args, **kwargs):
last_call = StubTwitterApi.calls.pop()
StubTwitterApi.calls.append({
"name": last_call,
"args": args,
"kwargs": kwargs,
})
class SanityTestOfStubTwitterApi(TestCase):
def setUp(self):
super(SanityTestOfStubTwitterApi, self).setUp()
StubTwitterApi.number_of_calls = 0
def test_sanity_check(self):
obj1 = StubTwitterApi()
self.assertEqual(StubTwitterApi.number_of_calls, 1)
obj2 = StubTwitterApi()
self.assertEqual(StubTwitterApi.number_of_calls, 2)
obj3 = StubTwitterApi()
self.assertEqual(StubTwitterApi.number_of_calls, 3)
def test_keeps_track_of_calls(self):
obj = StubTwitterApi()
obj.foobar()
self.assertEqual(len(StubTwitterApi.calls), 1)
def test_keeps_track_of_parameters_passed_in_to_methods(self):
obj = StubTwitterApi()
number = random(10, 100)
obj.foobar(number)
data = StubTwitterApi.calls.pop()
self.assertEquals(data['args'], (number,))
def generate_full_update(number_of_seeders):
account = generate_random_authorized_account()
[generate_random_token(generate_random_seeder(account)) for i in range(number_of_seeders)]
update = generate_random_update(account)
return update
class StubSettingsForTwitterApi(object):
TWITTER = {
"CONSUMER_KEY": "foobar",
"CONSUMER_SECRET": "barfoo",
}
class TestOfTwitterPoster(TestCase):
def setUp(self):
super(TestOfTwitterPoster, self).setUp()
StubTwitterApi.number_of_calls = 0
StubTwitterApi.calls = []
def test_encapsulates_post_in_template_string(self):
settings = StubSettingsForTwitterApi()
random_prefix = "random %d" % random(10, 100)
settings.TWITTER["POST_TEMPLATE"] = "%s: %%s" % random_prefix
u = generate_full_update(1)
poster = TwitterPoster(api_class = StubTwitterApi, settings = settings)
poster.post(u.seededupdate_set.all()[0])
for data in StubTwitterApi.calls:
if data['name'] == 'PostUpdate':
break
(posted_status,) = data['args']
expected_status = "%s: .*" % random_prefix
self.assertTrue(
re.compile(expected_status).match(posted_status) is not None
)
def test_instantiates_new_api_class_for_each_token(self):
number_of_seeders = random(2, 10)
u = generate_full_update(number_of_seeders)
poster = TwitterPoster(api_class = StubTwitterApi)
[seeded_update.send(poster) for seeded_update in u.seededupdate_set.all()]
self.assertEquals(StubTwitterApi.number_of_calls, number_of_seeders)
def assertSetSourceCalledWith(self, value):
for data in StubTwitterApi.calls:
if data["name"] == "SetSource":
break
self.assertEquals((value,), data["args"])
def test_sets_source_to_seeder_if_not_configured(self):
u = generate_full_update(1)
poster = TwitterPoster(api_class = StubTwitterApi)
poster.post(u.seededupdate_set.all()[0])
self.assertSetSourceCalledWith("seeder")
def test_sets_source_to_configured_value(self):
settings = StubSettingsForTwitterApi()
random_source = "random value: " + str(random(10, 100))
settings.TWITTER["SOURCE"] = random_source
u = generate_full_update(1)
poster = TwitterPoster(api_class = StubTwitterApi, settings = settings)
poster.post(u.seededupdate_set.all()[0])
self.assertSetSourceCalledWith(random_source)
|
tswicegood/seeder
|
seeder/tests.py
|
Python
|
gpl-3.0
| 11,995
|
#!coding: utf-8
"""
Usage:
main.py <host> <username> <password> [-r] [--port=<port>] [--hub=<hub>] [--pppoe-username=<username>] [--pppoe-password=<password>] [--output=<output>]
Options:
-h --help Show help
-r Change route to make connect to Packetix Server always use default gw
--env Get param from env
--port=<port> Packetix Server Port [default: 15555].
--hub=<hub> Packetix Server Hub [default: VPN]
--pppoe-username=<username> PPPoE username
--pppoe-password=<password> PPPoE password
--output=<output> output file
"""
import sys
from docopt import docopt
import time
from command import Commander
import netifaces
import logging
def add_route(packetix_host):
import socket
from socket import AF_INET
from pyroute2 import IPRoute
gws = netifaces.gateways()
default_gw = gws['default'][netifaces.AF_INET]
logging.INFO('default gw %s' + str(default_gw))
dst_ip = socket.gethostbyname(packetix_host)
logging.INFO('packetix server : %s', dst_ip)
ip = IPRoute()
ip.route(
'add',
dst=dst_ip,
gateway=default_gw[0],
metrics={
'mtu': 1500,
'hoplimit': 16
}
)
def writeconf(template,target,**kw):
with open(template) as f:
data = f.read()
with open(target,"w+") as f:
f.write(data.format(**kw))
return True
return False
def init_pppoe(args , status_loop = 10):
if args.get("--pppoe-username") is None:
logging.error("not have --pppoe-username")
return False,"pppoe param error"
if args.get('--pppoe-password') is None:
logging.error("not have --pppoe-password")
return False,"pppoe param error"
ok = writeconf("/etc/ppp/peers/dsl-provider.tp","/etc/ppp/peers/dsl-provider",username=args["--pppoe-username"])
if not ok:
logging.error("write conf /etc/ppp/peers/dsl-provider failed")
return False,"write conf /etc/ppp/peers/dsl-provider failed"
ok = writeconf("/etc/ppp/pap-secrets.tp","/etc/ppp/pap-secrets",username=args["--pppoe-username"],password=args['--pppoe-password'])
if not ok:
logging.error("write conf /etc/ppp/pap-secrets failed")
return False,"write conf /etc/ppp/pap-secrets failed"
c = Commander()
rd, ed = c.command2("pon dsl-provider")
if len(ed) > 0:
logging.error("pon failed")
return False,"pon failed"
for i in range(status_loop):
ok,why = is_pppoe_conneced()
logging.info("pppoe {ok}({why})".format(ok=ok,why=why))
if ok:
return True,why
time.sleep(1)
return False,"pppoe error"
def is_pppoe_conneced():
c = Commander()
rd, ed = c.command2("plog")
for l in rd.split("\n"):
index = l.find("local IP address")
if index != -1:
ip = l[index+len("local IP address"):].strip()
return True,ip
return False,"error"
def output(args,**kw):
output = args.get("--output")
if output is None:
return False
with open(output,"a+") as f:
for k,v in kw.items():
f.write("{key} : {value}\n".format(key=k,value=v))
f.write("\n")
return True
def init_vpn(args, status_loop = 10):
c = Commander()
rd, ed = c.command2("service rsyslog start")
if len(ed) > 0:
logging.error("start rsyslog failed")
return False,"start rsyslog failed"
rd, ed = c.command2("/opt/vpnclient/vpnclient start")
if len(ed) > 0:
logging.error("start vpnclient failed")
return False,"start vpnclient failed"
time.sleep(1)
ok, rd, ed = c.vpn_command("NicCreate p1")
if not ok:
logging.error("create nic failed")
return False,"create nic failed"
time.sleep(1)
if 'vpn_p1' not in netifaces.interfaces():
logging.error("create nic failed")
return False,"create nic failed"
ok, rd, ed = c.vpn_command(
"AccountCreate {username} /SERVER:{host}:{port} /HUB:VPN /USERNAME:{username} /NICNAME:p1".format(
username=args["<username>"],
host=args["<host>"],
port=15555,
)
)
if not ok:
logging.error("create account failed")
return False,"create account failed"
ok, rd, ed = c.vpn_command(
"AccountPasswordSet {username} /PASSWORD:{password} /TYPE:standard".format(
username=args["<username>"],
password=args["<password>"],
)
)
if not ok:
logging.error("account set password failed")
return False,"account set password failed"
ok, rd, ed = c.vpn_command(
"AccountConnect {username} ".format(
username=args["<username>"],
)
)
if not ok:
logging.error("connect failed")
return False,"connect failed"
for i in range(status_loop):
ok,why = is_vpn_connected()
logging.info("vpn connect %s (%s)"%(ok,why))
if ok:
return True,why
time.sleep(1)
return False,"vpn error"
def is_vpn_connected():
c = Commander()
ok, rd, ed = c.vpn_command(
"AccountStatusGet {username} ".format(
username=args["<username>"],
)
)
if not ok:
return False,"command not runing"
#print "\n".join(rd)
for l in rd:
if l.find("|") != -1:
key,value = l.split("|")
if key.find("Session Status") != -1 :
if value.find("Connection Completed (Session Established)") != -1:
return True,value
else:
return False,value
return False,"error"
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(
level=logging.DEBUG,
format='%(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
#print args
if args.get("--env"):
if os.getenv("HOST"):
args['<host>'] = os.getenv("HOST")
if os.getenv("USERNAME"):
args['<username>'] = os.getenv("USERNAME")
if os.getenv("PASSWORD"):
args['<password>'] = os.getenv("PASSWORD")
if os.getenv("PPPOE_USERNAME"):
args['--pppoe-username'] = os.getenv("PPPOE_USERNAME")
if os.getenv("PPPOE_PASSWORD"):
args['--pppoe-password'] = os.getenv("PPPOE_PASSWORD")
print args
ok,why = init_vpn(args)
output(args,vpn=ok,status=why)
if ok:
ok,why = init_pppoe(args)
output(args,pppoe=ok,ip=why)
if args['-r']:
add_route(args['<host>'])
sys.exit()
|
zhongpei/softether-client
|
endpoints/main.py
|
Python
|
gpl-3.0
| 6,782
|
from PyQt5.QtCore import Qt
from ouf.filemodel.filemodelitem import FileModelItem, FileItemType
class FileSystemItem(FileModelItem):
def __init__(self, path):
super().__init__(FileItemType.filesystem, path)
def data(self, column, role=Qt.DisplayRole):
if column == 0:
if role == Qt.DisplayRole:
if self.is_root:
return _("File System")
return super().data(column, role)
|
cbrunet/ouf
|
src/ouf/filemodel/filesystemitem.py
|
Python
|
gpl-3.0
| 455
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Tue Dec 27 19:28:14 2016
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import wxgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.wxgui import forms
from gnuradio.wxgui import scopesink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
class top_block(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Top Block")
##################################################
# Variables
##################################################
self.var = var = 11
self.samp_rate = samp_rate = 1e6
self.freq = freq = 1e3
##################################################
# Blocks
##################################################
_freq_sizer = wx.BoxSizer(wx.VERTICAL)
self._freq_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_freq_sizer,
value=self.freq,
callback=self.set_freq,
label='freq',
converter=forms.float_converter(),
proportion=0,
)
self._freq_slider = forms.slider(
parent=self.GetWin(),
sizer=_freq_sizer,
value=self.freq,
callback=self.set_freq,
minimum=0,
maximum=16e3,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_freq_sizer)
self.wxgui_scopesink2_0 = scopesink2.scope_sink_c(
self.GetWin(),
title='Scope Plot',
sample_rate=samp_rate,
v_scale=0,
v_offset=0,
t_scale=0,
ac_couple=False,
xy_mode=False,
num_inputs=1,
trig_mode=wxgui.TRIG_MODE_AUTO,
y_axis_label='Counts',
)
self.Add(self.wxgui_scopesink2_0.win)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, freq, 1, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.wxgui_scopesink2_0, 0))
def get_var(self):
return self.var
def set_var(self, var):
self.var = var
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.wxgui_scopesink2_0.set_sample_rate(self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self._freq_slider.set_value(self.freq)
self._freq_text_box.set_value(self.freq)
self.analog_sig_source_x_0.set_frequency(self.freq)
def main(top_block_cls=top_block, options=None):
tb = top_block_cls()
tb.Start(True)
tb.Wait()
if __name__ == '__main__':
main()
|
james-tate/gnuradio_projects
|
ettus_lab/lab1/top_block.py
|
Python
|
gpl-3.0
| 3,795
|
import random
from os.path import join, dirname
import numpy as np
from sklearn.base import ClassifierMixin, BaseEstimator
import fasttext as ft
from underthesea.util.file_io import write
import os
from underthesea.util.singleton import Singleton
class FastTextClassifier(ClassifierMixin, BaseEstimator):
def __init__(self):
self.estimator = None
def fit(self, X, y, model_filename=None):
"""Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
"""
train_file = "temp.train"
X = [x.replace("\n", " ") for x in X]
y = [item[0] for item in y]
y = [_.replace(" ", "-") for _ in y]
lines = ["__label__{} , {}".format(j, i) for i, j in zip(X, y)]
content = "\n".join(lines)
write(train_file, content)
if model_filename:
self.estimator = ft.supervised(train_file, model_filename)
else:
self.estimator = ft.supervised(train_file)
os.remove(train_file)
def predict(self, X):
return
def predict_proba(self, X):
output_ = self.estimator.predict_proba(X)
def transform_item(item):
label, score = item[0]
label = label.replace("__label__", "")
label = int(label)
if label == 0:
label = 1
score = 1 - score
return [label, score]
output_ = [transform_item(item) for item in output_]
output1 = np.array(output_)
return output1
@Singleton
class FastTextPredictor:
def __init__(self):
filepath = join(dirname(__file__), "fasttext.model")
self.estimator = ft.load_model(filepath)
def tranform_output(self, y):
y = y[0].replace("__label__", "")
y = y.replace("-", " ")
return y
def predict(self, X):
X = [X]
y_pred = self.estimator.predict(X)
y_pred = [self.tranform_output(item) for item in y_pred]
return y_pred
|
rain1024/underthesea
|
underthesea/classification/model_fasttext.py
|
Python
|
gpl-3.0
| 2,185
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
PisteCreatorDockWidget_OptionDock
Option dock for Qgis plugins
Option dock initialize
-------------------
begin : 2017-07-25
last : 2017-10-20
copyright : (C) 2017 by Peillet Sebastien
email : peillet.seb@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
import os
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtWidgets import QDialog
from qgis.PyQt import uic
from qgis.PyQt.QtCore import pyqtSignal, QSettings
from qgis.gui import QgsColorButton
def hex_to_rgb(value):
value = value.lstrip("#")
lv = len(value)
return list(int(value[i : i + lv / 3], 16) for i in range(0, lv, lv / 3))
class OptionDock(QDialog):
closingPlugin = pyqtSignal()
def __init__(self, plugin, graph_widget, canvas, parent=None):
"""Constructor."""
super(OptionDock, self).__init__(parent)
uic.loadUi(os.path.join(os.path.dirname(__file__), "Option_dock.ui"), self)
self.settings = QSettings()
self.initConfig()
self.graph_widget = graph_widget
self.PisteCreatorTool = plugin.PisteCreatorTool
self.canvas = canvas
self.plugin = plugin
self.saveButton.clicked.connect(self.saveconfig)
def initConfig(self):
self.sideDistInt = self.settings.value(
"PisteCreator/calculation_variable/side_distance", 6
)
self.sideDistSpinBox.setValue(int(self.sideDistInt))
self.aslopeInt = self.settings.value(
"PisteCreator/graphical_visualisation/tolerated_a_slope", 10
)
self.toleratedASlopeSpinBox.setValue(int(self.aslopeInt))
self.cslopeInt = self.settings.value(
"PisteCreator/graphical_visualisation/tolerated_c_slope", 4
)
self.toleratedCSlopeSpinBox.setValue(int(self.cslopeInt))
self.lengthInt = self.settings.value(
"PisteCreator/graphical_visualisation/max_length", 50
)
self.maxLengthSpinBox.setValue(int(self.lengthInt))
self.lengthBool = self.settings.value(
"PisteCreator/graphical_visualisation/max_length_hold", False
)
self.maxLengthCheckBox.setChecked(bool(self.lengthBool))
self.swathInt = self.settings.value(
"PisteCreator/graphical_visualisation/swath_distance", 30
)
self.swathDistSpinBox.setValue(int(self.swathInt))
self.swathBool = self.settings.value(
"PisteCreator/graphical_visualisation/swath_display", True
)
self.swathDistCheckBox.setChecked(bool(self.swathBool))
self.interpolBool = self.settings.value(
"PisteCreator/calculation_variable/interpolate_act", True
)
self.interpolCheckBox.setChecked(bool(self.interpolBool))
self.t_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/t_color", "#00d003"
)
)
self.f_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/f_color", "#ff0000"
)
)
self.tl_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/tl_color", "#236433"
)
)
self.fl_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/fl_color", "#b80000"
)
)
self.b_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/b_color", "#0fff33"
)
)
self.a_color = QColor(
self.settings.value(
"PisteCreator/graphical_visualisation/a_color", "#48b0d2"
)
)
self.T_ColorButton.setColor(self.t_color)
self.F_ColorButton.setColor(self.f_color)
self.TL_ColorButton.setColor(self.tl_color)
self.FL_ColorButton.setColor(self.fl_color)
self.B_ColorButton.setColor(self.b_color)
self.A_ColorButton.setColor(self.a_color)
def saveconfig(self):
# self.checkChanges()
self.sideDistInt = self.sideDistSpinBox.value()
self.aslopeInt = self.toleratedASlopeSpinBox.value()
self.cslopeInt = self.toleratedCSlopeSpinBox.value()
self.lengthInt = self.maxLengthSpinBox.value()
self.lengthBool = self.maxLengthCheckBox.isChecked()
self.swathInt = self.swathDistSpinBox.value()
self.swathBool = self.swathDistCheckBox.isChecked()
self.interpolBool = self.interpolCheckBox.isChecked()
self.t_color = self.T_ColorButton.color().name()
self.f_color = self.F_ColorButton.color().name()
self.tl_color = self.TL_ColorButton.color().name()
self.fl_color = self.FL_ColorButton.color().name()
self.a_color = self.A_ColorButton.color().name()
self.b_color = self.B_ColorButton.color().name()
self.settings.setValue(
"PisteCreator/calculation_variable/side_distance",
self.sideDistSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/tolerated_a_slope",
self.toleratedASlopeSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/tolerated_c_slope",
self.toleratedCSlopeSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/max_length",
self.maxLengthSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/max_length_hold",
self.maxLengthCheckBox.isChecked(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/swath_distance",
self.swathDistSpinBox.value(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/swath_display",
self.swathDistCheckBox.isChecked(),
)
self.settings.setValue(
"PisteCreator/calculation_variable/interpolate_act",
self.interpolCheckBox.isChecked(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/t_color",
self.T_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/f_color",
self.F_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/tl_color",
self.TL_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/fl_color",
self.FL_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/b_color",
self.B_ColorButton.color().name(),
)
self.settings.setValue(
"PisteCreator/graphical_visualisation/a_color",
self.A_ColorButton.color().name(),
)
try:
if self.canvas.mapTool().map_tool_name == "SlopeMapTool":
self.plugin.PisteCreatorTool.configChange(
self.sideDistInt,
self.aslopeInt,
self.cslopeInt,
self.lengthInt,
self.lengthBool,
self.swathInt,
self.swathBool,
self.interpolBool,
self.t_color,
self.f_color,
self.tl_color,
self.fl_color,
self.b_color,
self.a_color,
)
except AttributeError:
pass
self.close()
def closeEvent(self, event):
self.closingPlugin.emit()
event.accept()
|
SebastienPeillet/PisteCreator
|
gui/option_Dock.py
|
Python
|
gpl-3.0
| 8,879
|
# COPYRIGHT: Robosub Club of the Palouse under the GPL v3
import argparse
import time
import os
import sys
from copy import deepcopy
from random import random
sys.path.append(os.path.abspath("../.."))
from util.communication.grapevine import Communicator
# TODO: This module should take the fuzzy sets produced by
# movement/stabilization and should translate them into raw digital
# values that can be sent over the serial interface.
# microcontroller_interface.py currently does much of this processing,
# but it shouldn't. microcontroller_interface.py should figure out how
# to send data over the serial interface and how to receive data over
# the serial interface. Anything that is beyond that scope, such as
# translating a magnitude into a raw value, should be moved into this
# module.
def main(args):
com = Communicator("movement/physical")
last_packet_time = 0.0
while True:
rx_packet = com.get_last_message("movement/stabilization")
if rx_packet and rx_packet['timestamp'] > last_packet_time:
last_packet_time = rx_packet['timestamp']
tx_packet = {
'vector': rx_packet['vector'],
'rotation': rx_packet['rotation']}
com.publish_message(tx_packet)
time.sleep(args.epoch)
def commandline():
parser = argparse.ArgumentParser(description='Mock module.')
parser.add_argument('-e', '--epoch', type=float,
default=0.05,
help='Sleep time per cycle.')
return parser.parse_args()
if __name__ == '__main__':
args = commandline()
main(args)
|
pi19404/robosub-1
|
src/movement/physical/fuzzy_logic_defuzzifier.py
|
Python
|
gpl-3.0
| 1,604
|
# coding:utf8
"""
无法从上层目录进行导入操作
"""
class UnableTest(object):
pass
|
unlessbamboo/grocery-shop
|
language/python/src/package/abs_import/unable/unable_module.py
|
Python
|
gpl-3.0
| 104
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/chernomirdinmacuvele/Documents/workspace/PscArt2.0.X/UserInt/ui_codificadores_POT.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(306, 332)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(Form)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.LECodigo = QtWidgets.QLineEdit(Form)
self.LECodigo.setMaxLength(3)
self.LECodigo.setObjectName("LECodigo")
self.gridLayout.addWidget(self.LECodigo, 0, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.LENome = QtWidgets.QLineEdit(Form)
self.LENome.setMaxLength(15)
self.LENome.setObjectName("LENome")
self.gridLayout.addWidget(self.LENome, 1, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.PTEDescricao = QtWidgets.QPlainTextEdit(Form)
self.PTEDescricao.setObjectName("PTEDescricao")
self.gridLayout.addWidget(self.PTEDescricao, 2, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 3, 0, 1, 1)
self.PTEComentarios = QtWidgets.QPlainTextEdit(Form)
self.PTEComentarios.setObjectName("PTEComentarios")
self.gridLayout.addWidget(self.PTEComentarios, 3, 1, 1, 1)
self.CHBActivo = QtWidgets.QCheckBox(Form)
self.CHBActivo.setObjectName("CHBActivo")
self.gridLayout.addWidget(self.CHBActivo, 4, 1, 1, 1)
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.PBGuardar = QtWidgets.QPushButton(self.splitter)
self.PBGuardar.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/newPrefix/Icons/002-save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.PBGuardar.setIcon(icon)
self.PBGuardar.setObjectName("PBGuardar")
self.PBCancelar = QtWidgets.QPushButton(self.splitter)
self.PBCancelar.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/newPrefix/Icons/003-error.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.PBCancelar.setIcon(icon1)
self.PBCancelar.setObjectName("PBCancelar")
self.gridLayout.addWidget(self.splitter, 5, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Codificador"))
self.label.setText(_translate("Form", "Codigo:"))
self.LECodigo.setPlaceholderText(_translate("Form", "Ex:AAA"))
self.label_3.setText(_translate("Form", "Nome:"))
self.LENome.setPlaceholderText(_translate("Form", "Ex:Qualquer Coisa"))
self.label_4.setText(_translate("Form", "Descricao:"))
self.PTEDescricao.setPlaceholderText(_translate("Form", "Ex:O que faz…"))
self.label_5.setText(_translate("Form", "Comentarios:"))
self.PTEComentarios.setPlaceholderText(_translate("Form", "Ex:Nota, Obs…"))
self.CHBActivo.setText(_translate("Form", "Activo"))
import icons_rc
|
InUrSys/PescArt2.0
|
GeneratedFiles/ui_codificadores_POT.py
|
Python
|
gpl-3.0
| 3,813
|
from __future__ import unicode_literals
from zipfile import ZipFile
import decimal
import datetime
from xml.dom.minidom import parseString
from . import ods_components
from .formula import Formula
# Basic compatibility setup for Python 2 and Python 3.
try:
long
except NameError:
long = int
try:
unicode
except NameError:
unicode = str
# End compatibility setup.
class ODSWriter(object):
"""
Utility for writing OpenDocument Spreadsheets. Can be used in simple 1 sheet mode (use writerow/writerows) or with
multiple sheets (use new_sheet). It is suggested that you use with object like a context manager.
"""
def __init__(self, odsfile):
self.zipf = ZipFile(odsfile, "w")
# Make the skeleton of an ODS.
self.dom = parseString(ods_components.content_xml)
self.zipf.writestr("mimetype",
ods_components.mimetype.encode("utf-8"))
self.zipf.writestr("META-INF/manifest.xml",
ods_components.manifest_xml.encode("utf-8"))
self.zipf.writestr("styles.xml",
ods_components.styles_xml.encode("utf-8"))
self.default_sheet = None
self.sheets = []
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def close(self):
"""
Finalises the compressed version of the spreadsheet. If you aren't using the context manager ('with' statement,
you must call this manually, it is not triggered automatically like on a file object.
:return: Nothing.
"""
self.zipf.writestr("content.xml", self.dom.toxml().encode("utf-8"))
self.zipf.close()
def writerow(self, cells):
"""
Write a row of cells into the default sheet of the spreadsheet.
:param cells: A list of cells (most basic Python types supported).
:return: Nothing.
"""
if self.default_sheet is None:
self.default_sheet = self.new_sheet()
self.default_sheet.writerow(cells)
def writerows(self, rows):
"""
Write rows into the default sheet of the spreadsheet.
:param rows: A list of rows, rows are lists of cells - see writerow.
:return: Nothing.
"""
for row in rows:
self.writerow(row)
def new_sheet(self, name=None, cols=None):
"""
Create a new sheet in the spreadsheet and return it so content can be added.
:param name: Optional name for the sheet.
:param cols: Specify the number of columns, needed for compatibility in some cases
:return: Sheet object
"""
sheet = Sheet(self.dom, name, cols)
self.sheets.append(sheet)
return sheet
class Sheet(object):
def __init__(self, dom, name="Sheet 1", cols=None):
self.dom = dom
self.cols = cols
spreadsheet = self.dom.getElementsByTagName("office:spreadsheet")[0]
self.table = self.dom.createElement("table:table")
if name:
self.table.setAttribute("table:name", name)
self.table.setAttribute("table:style-name", "ta1")
if self.cols is not None:
col = self.dom.createElement("table:table-column")
col.setAttribute("table:number-columns-repeated", unicode(self.cols))
self.table.appendChild(col)
spreadsheet.appendChild(self.table)
def writerow(self, cells):
row = self.dom.createElement("table:table-row")
content_cells = len(cells)
if self.cols is not None:
padding_cells = self.cols - content_cells
if content_cells > self.cols:
raise Exception("More cells than cols.")
cells += [None]*padding_cells
for cell_data in cells:
cell = self.dom.createElement("table:table-cell")
text = None
if isinstance(cell_data, (datetime.date, datetime.datetime)):
cell.setAttribute("office:value-type", "date")
date_str = cell_data.isoformat()
cell.setAttribute("office:date-value", date_str)
cell.setAttribute("table:style-name", "cDateISO")
text = date_str
elif isinstance(cell_data, datetime.time):
cell.setAttribute("office:value-type", "time")
cell.setAttribute("office:time-value",
cell_data.strftime("PT%HH%MM%SS"))
cell.setAttribute("table:style-name", "cTime")
text = cell_data.strftime("%H:%M:%S")
elif isinstance(cell_data, bool):
# Bool condition must be checked before numeric because:
# isinstance(True, int): True
# isinstance(True, bool): True
cell.setAttribute("office:value-type", "boolean")
cell.setAttribute("office:boolean-value",
"true" if cell_data else "false")
cell.setAttribute("table:style-name", "cBool")
text = "TRUE" if cell_data else "FALSE"
elif isinstance(cell_data, (float, int, decimal.Decimal, long)):
cell.setAttribute("office:value-type", "float")
float_str = unicode(cell_data)
cell.setAttribute("office:value", float_str)
text = float_str
elif isinstance(cell_data, Formula):
cell.setAttribute("table:formula", str(cell_data))
elif cell_data is None:
pass # Empty element
else:
# String and unknown types become string cells
cell.setAttribute("office:value-type", "string")
text = unicode(cell_data)
if text:
p = self.dom.createElement("text:p")
p.appendChild(self.dom.createTextNode(text))
cell.appendChild(p)
row.appendChild(cell)
self.table.appendChild(row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def writer(odsfile, *args, **kwargs):
"""
Returns an ODSWriter object.
Python 3: Make sure that the file you pass is mode b:
f = open("spreadsheet.ods", "wb")
odswriter.writer(f)
...
Otherwise you will get "TypeError: must be str, not bytes"
"""
return ODSWriter(odsfile, *args, **kwargs)
|
jeremyk6/qgeric
|
odswriter/__init__.py
|
Python
|
gpl-3.0
| 6,498
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 09 10:36:56 2017
@author: Nzix
"""
import urllib,urllib2,re,datetime,time,random,xlrd,xlwt,os
cookie = ""
def ticketcheck(membercode,serial_code_1,serial_code_2):
timeout = 3
global cookie
if type(membercode) is int:
membercode = str(membercode)
teamcode = membercode[0] + "0" + membercode[1]
link = "http://akb48-sousenkyo.jp/vote.php?membercode=%s&parent=team&parentkey=%s"%(membercode,teamcode)
attempt = 0
result = ""
proxy = 0
while True:
attempt = attempt + 1
if attempt > 3:
break
headers={}
headers["Host"] = "akb48-sousenkyo.jp"
headers["User-Agent"] = "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 Mobile/14F89 Safari/602.1"
if cookie != "":
headers["Cookie"] = cookie
access = urllib2.Request(url=link, headers=headers)
reconnect = 5
while True:
try:
response = urllib2.urlopen(access,timeout=timeout)
except:
reconnect = reconnect - 1
if reconnect <= 0:
exit()
else:
break
if "set-cookie" in response.headers:
cookie = response.headers["set-cookie"]
votepage = response.read().decode('shift-jis').encode('utf-8')
data = {}
data["serial_code_1"] = serial_code_1
data["serial_code_2"] = serial_code_2
form = re.findall(r'<input type="hidden" name="([^"]+)" value="([^"]*)"',votepage)
for item in form:
if item[1] != "":
data[item[0]] = item[1]
data = urllib.urlencode(data)
headers["Cookie"] = cookie
headers["Content-Type"] = "application/x-www-form-urlencoded"
headers["Origin"] = "http://akb48-sousenkyo.jp"
headers["Referer"] = link
time.sleep(0.5)
submit = urllib2.Request(url="http://akb48-sousenkyo.jp/vote_thanks.php",data=data,headers=headers)
try:
response = urllib2.urlopen(submit,timeout=timeout)
except:
result = u"ネットワークタイムアウト"
continue
if response.geturl() != "http://akb48-sousenkyo.jp/vote_thanks.php":
result = u"お客様の端末は非推奨です"
continue
statuspage = response.read().decode('shift-jis').encode('utf-8')
message = re.search(r'<p class="mb20">([\s\S]+?)</p>',statuspage).group(1)
message = re.sub(r'\s*',"",message)
if message.find('シリアルナンバーに誤りがあります。ご確認ください。') != -1:
result = u"シリアルナンバーに誤りがあります"
break
elif message.find('入力されたシリアルナンバーは、既に投票されています。') != -1:
timestr = re.search(r'投票日時:\d{4}年\d{2}月\d{2}日\d{2}時\d{2}分\d{2}秒',message).group(0)
result = timestr.decode("utf-8")
break
elif message.find('入力されたシリアルナンバーは無効であるか既に投票済みです。') != -1:
result = u"入力されたシリアルナンバーは無効であるか既に投票済みです"
break
elif message.find('ご投票いただきありがとうございました。') != -1:
result = u"ご投票いただきありがとうございました"
proxy = 1
attempt = 1
continue
else:
result = message.decode("utf-8")
break
return (result,proxy)
allfiles = os.listdir('./xls/')
if len(allfiles) == 0:
print "no tickets need to check"
exit()
if os.path.exists("./output/") == False:
os.mkdir("./output/")
for filename in allfiles:
tickets = []
data = xlrd.open_workbook('./xls/' + filename)
sheet1 = data.sheets()[0]
rows = sheet1.nrows
columns = sheet1.ncols
offsetx = 0
offsety = 0
for i in xrange(0,rows*columns):
if type(sheet1.cell_value(i//columns,i%columns)) is unicode:
noblankcell = re.sub(u'[\s|(\u3000)|(\xa0)]*',"",sheet1.cell_value(i//columns,i%columns))
if re.search(r'^\w{8}$',noblankcell) != None:
offsetx = i//columns
offsety = i%columns
break
for r in xrange(offsetx,rows):
row = []
if type(sheet1.row_values(r)[offsety]) is unicode and type(sheet1.row_values(r)[offsety + 1]) is unicode:
noblankcell1 = re.sub(u'[\s|(\u3000)|(\xa0)]*',"",sheet1.row_values(r)[offsety])
noblankcell2 = re.sub(u'[\s|(\u3000)|(\xa0)]*',"",sheet1.row_values(r)[offsety + 1])
if re.search(r'^\w{8}$',noblankcell1)!=None and re.search(r'^\w{8}$',noblankcell2)!=None:
row.append(noblankcell1)
row.append(noblankcell2)
tickets.append(row)
else:
if noblankcell1 != "" and noblankcell2 != "":
row.append(noblankcell1)
row.append(noblankcell2)
row.append(u'シリアルナンバーはそれぞれ8桁ずつ半角英数字で入力してください')
tickets.append(row)
print "[error]",filename,"line",r+1," ",noblankcell1,noblankcell2
else:
print "[error]",filename,"line",r+1," ",sheet1.row_values(r)[offsety],sheet1.row_values(r)[offsety + 1]
total = len(tickets)
bits = len(str(total))
print "%s total %d"%(filename,total)
output = []
for t in xrange(0,total):
serial_code_1 = tickets[t][0]
serial_code_2 = tickets[t][1]
if len(tickets[t])==3:
output.append([serial_code_1,serial_code_2,tickets[t][2],u"否"])
continue
result = ticketcheck(1307,serial_code_1,serial_code_2)
if result[1] == 1:
miss = u"是"
elif result[1] == 0:
miss = u"否"
print str(t+1).zfill(bits)," ",serial_code_1,serial_code_2," ",result[0],miss
output.append([serial_code_1,serial_code_2,result[0],miss])
workbook = xlwt.Workbook()
sheet1 = workbook.add_sheet('sheet1',cell_overwrite_ok=True)
style = xlwt.XFStyle()
font = xlwt.Font()
font.name = u'等线'
# font.colour_index = 2 #red
font.height = 14 * 20 #16 point
font.bold = False
for i in xrange(0,total):
if output[i][3] == u'否' and re.search(u'投票日時',output[i][2]) != None:
font.colour_index = 0
else:
font.colour_index = 2
style.font = font
sheet1.write(i,0,i+1,style)
sheet1.write(i,1,output[i][0],style)
sheet1.write(i,2,output[i][1],style)
sheet1.write(i,3,output[i][2],style)
sheet1.write(i,4,output[i][3],style)
sheet1.write(i,5,u'nondanee',style)
sheet1.col(0).width = 256 * 8
sheet1.col(1).width = 256 * 14
sheet1.col(2).width = 256 * 14
sheet1.col(3).width = 256 * 56
sheet1.col(4).width = 256 * 8
sheet1.col(5).width = 256 * 15
namepart = filename.split(".")[0]
workbook.save('./output/' + namepart + "-checked.xls")
print "Done"
|
nondanee/sousenkyo-auto
|
2017/formatscript.py
|
Python
|
gpl-3.0
| 7,489
|
# -*- coding: utf-8 -*-
#
# testgdt documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'testgdt'
copyright = u'2012, gdt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'templateclassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'testgdt.tex', u'testgdt Documentation',
u'gdt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'templateclass', u'testgdt Documentation',
[u'gdt'], 1)
]
|
UCL-CERU/CESMapper
|
CESMapper/conf.py
|
Python
|
gpl-3.0
| 7,012
|
# -*- coding: utf-8 -*-
"""PEP 440 verschemes tests"""
import unittest
from verschemes.pep440 import Pep440Version
class Pep440VersionTestCase(unittest.TestCase):
def test_one_segment(self):
version = Pep440Version(release1=4)
self.assertEqual("4", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(4, version.release1)
self.assertEqual(0, version.release2)
self.assertEqual(0, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(None, version.pre_release)
self.assertEqual(None, version.post_release)
self.assertEqual(None, version.development)
def test_two_segments(self):
version = Pep440Version(release1=8, release2=2)
self.assertEqual("8.2", str(version))
self.assertEqual(8, version.release1)
self.assertEqual(2, version.release2)
def test_three_segments(self):
version = Pep440Version(None, 3, 11, 8)
self.assertEqual("3.11.8", str(version))
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
def test_four_segments(self):
version = Pep440Version(release1=7, release2=1, release3=26,
release4=5)
self.assertEqual("7.1.26.5", str(version))
self.assertEqual(7, version.release1)
self.assertEqual(1, version.release2)
self.assertEqual(26, version.release3)
self.assertEqual(5, version.release4)
def test_epoch(self):
version = Pep440Version(4, 3, 11, 8)
self.assertEqual("4!3.11.8", str(version))
self.assertEqual(4, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
def test_pre_release(self):
version = Pep440Version(None, 3, 11, 8, pre_release=('a', 2))
self.assertEqual("3.11.8a2", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(None, version.post_release)
self.assertEqual(None, version.development)
def test_post_release(self):
version = Pep440Version(None, 3, 11, 8, pre_release=('a', 2))
self.assertEqual("3.11.8a2", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(None, version.post_release)
self.assertEqual(None, version.development)
def test_pre_and_post_release(self):
version = Pep440Version(2, 3, 11, 8, pre_release=('a', 2),
post_release=4)
self.assertEqual("2!3.11.8a2.post4", str(version))
self.assertEqual(2, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(4, version.post_release)
self.assertEqual(None, version.development)
def test_development(self):
version = Pep440Version(release1=2112, development=90125)
self.assertEqual("2112.dev90125", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(2112, version.release1)
self.assertEqual(0, version.release2)
self.assertEqual(0, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(None, version.pre_release)
self.assertEqual(None, version.post_release)
self.assertEqual(90125, version.development)
def test_pre_release_and_development(self):
version = Pep440Version(None, 3, 11, 8, pre_release=('a', 2),
development=36)
self.assertEqual("3.11.8a2.dev36", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(None, version.post_release)
self.assertEqual(36, version.development)
def test_pre_and_post_release_and_development(self):
version = Pep440Version(1, 3, 11, 8, pre_release=('a', 2),
post_release=5, development=74)
self.assertEqual("1!3.11.8a2.post5.dev74", str(version))
self.assertEqual(1, version.epoch)
self.assertEqual(3, version.release1)
self.assertEqual(11, version.release2)
self.assertEqual(8, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(('a', 2), version.pre_release)
self.assertEqual('a', version.pre_release.level)
self.assertEqual(2, version.pre_release.serial)
self.assertEqual(5, version.post_release)
self.assertEqual(74, version.development)
def test_development_only(self):
version = Pep440Version(development=666)
self.assertEqual("0.dev666", str(version))
self.assertEqual(0, version.epoch)
self.assertEqual(0, version.release1)
self.assertEqual(0, version.release2)
self.assertEqual(0, version.release3)
self.assertEqual(0, version.release4)
self.assertEqual(0, version.release5)
self.assertEqual(0, version.release6)
self.assertEqual(None, version.pre_release)
self.assertEqual(None, version.post_release)
self.assertEqual(666, version.development)
def test_init_string(self):
version = Pep440Version("6.48.2")
self.assertEqual(0, version.epoch)
self.assertEqual(6, version.release1)
self.assertEqual(48, version.release2)
self.assertEqual(2, version.release3)
self.assertEqual("6.48.2", str(version))
def test_init_string_epoch(self):
version = Pep440Version("1!6.48.2")
self.assertEqual(1, version.epoch)
self.assertEqual(6, version.release1)
self.assertEqual(48, version.release2)
self.assertEqual(2, version.release3)
self.assertEqual("1!6.48.2", str(version))
def test_init_string_alpha_separator(self):
indicators = ("a", "A", "alpha", "alpHA", "Alpha", "AlPHa", "ALPHA")
expected = "1.2a3"
for indicator in indicators:
self.assertEqual(expected,
str(Pep440Version("1.2{}3".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2.{}3".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2-{}3".format(indicator))))
expected = "1.2a0"
for indicator in indicators:
self.assertEqual(expected,
str(Pep440Version("1.2{}".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2.{}".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2-{}".format(indicator))))
def test_init_string_beta_separator(self):
indicators = ("b", "B", "beta", "beTA", "Beta", "BeTa", "BETA")
expected = "1.2b3"
for indicator in indicators:
self.assertEqual(expected,
str(Pep440Version("1.2{}3".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2.{}3".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2-{}3".format(indicator))))
expected = "1.2b0"
for indicator in indicators:
self.assertEqual(expected,
str(Pep440Version("1.2{}".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2.{}".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2-{}".format(indicator))))
def test_init_string_release_candidate_separator(self):
indicators = ("c", "C", "rc", "RC", "rC", "Rc")
expected = "1.2c3"
for indicator in indicators:
self.assertEqual(expected,
str(Pep440Version("1.2{}3".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2.{}3".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2-{}3".format(indicator))))
expected = "1.2c0"
for indicator in indicators:
self.assertEqual(expected,
str(Pep440Version("1.2{}".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2.{}".format(indicator))))
self.assertEqual(expected,
str(Pep440Version("1.2-{}".format(indicator))))
def test_init_string_post_release_separator(self):
expected = "1.2.post3"
self.assertEqual(expected, str(Pep440Version("1.2post3")))
self.assertEqual(expected, str(Pep440Version("1.2.post3")))
self.assertEqual(expected, str(Pep440Version("1.2-post3")))
expected = "1.2.post0"
self.assertEqual(expected, str(Pep440Version("1.2post")))
self.assertEqual(expected, str(Pep440Version("1.2.post")))
self.assertEqual(expected, str(Pep440Version("1.2-post")))
def test_init_string_development_separator(self):
expected = "1.2.dev3"
self.assertEqual(expected, str(Pep440Version("1.2dev3")))
self.assertEqual(expected, str(Pep440Version("1.2.dev3")))
self.assertEqual(expected, str(Pep440Version("1.2-dev3")))
expected = "1.2.dev0"
self.assertEqual(expected, str(Pep440Version("1.2dev")))
self.assertEqual(expected, str(Pep440Version("1.2.dev")))
self.assertEqual(expected, str(Pep440Version("1.2-dev")))
def test_defaulted_segments(self):
version = Pep440Version(release4=5)
self.assertEqual("0.0.0.5", str(version))
def test_render(self):
version = Pep440Version(development=42)
self.assertEqual(str(version), version.render())
self.assertEqual("0.dev42", version.render())
self.assertEqual("0.0.dev42", version.render(min_release_segments=2))
self.assertEqual("0.0.0.dev42", version.render(min_release_segments=3))
self.assertEqual("0!0.0.0.0.0.0.dev42",
version.render(exclude_defaults=False))
version = version.replace(epoch=0)
self.assertEqual("0!0.0.dev42", version.render(min_release_segments=2))
def test_render_exclude_defaults_callback_scope(self):
version = Pep440Version()
self.assertTrue(version._render_exclude_defaults_callback(0, [1, 2]))
self.assertFalse(version._render_exclude_defaults_callback(1, [1, 2]))
def test_is_release(self):
version = Pep440Version(development=42)
self.assertFalse(version.is_release)
version = Pep440Version(None, 4, 2)
self.assertTrue(version.is_release)
version = Pep440Version(release4=11)
self.assertTrue(version.is_release)
|
gnuworldman/verschemes
|
tests/test_pep440.py
|
Python
|
gpl-3.0
| 12,943
|
import sys
from mercurial import hg, node, ui
def main():
"""print (possibly remote) heads
Prints a series of lines consisting of hashes and branch names.
Specify a local or remote repository, defaulting to the configured remote.
"""
repo = sys.argv[1]
other = hg.peer(ui.ui(), {}, repo)
for tag, heads in other.branchmap().iteritems():
print "%s %s" % (node.short(heads[0]), tag)
if __name__ == "__main__":
main()
|
kfirprods/tpp
|
python/hg-rheads.py
|
Python
|
gpl-3.0
| 482
|
import lorun
import os
import codecs
import random
import subprocess
import config
import sys
RESULT_MAP = [
2, 10, 5, 4, 3, 6, 11, 7, 12
]
class Runner:
def __init__(self):
return
def compile(self, judger, srcPath, outPath):
cmd = config.langCompile[judger.lang] % {'root': sys.path[0], 'src': srcPath, 'target': outPath}
p = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.STDOUT)
retval = p.wait()
return (retval, p.stdout.read())
def judge(self, judger, srcPath, outPath, inFile, ansFile, memlimit, timelimit):
cmd = config.langRun[judger.lang] % {'src': srcPath, 'target': outPath}
fout_path = "".join([sys.path[0], "/", "%s/%d.out" % (config.dataPath["tempPath"], random.randint(0, 65536))])
if os.path.exists(fout_path):
os.remove(fout_path)
fin = open(inFile, 'rU')
fout = open(fout_path, 'w')
runcfg = {
'args': cmd.split(" "),
'fd_in': fin.fileno(),
'fd_out': fout.fileno(),
'timelimit': int(timelimit),
'memorylimit': int(memlimit)
}
rst = lorun.run(runcfg)
fin.close()
fout.close()
if rst['result'] == 0:
fans = open(ansFile, 'rU')
fout = open(fout_path, 'rU')
crst = lorun.check(fans.fileno(), fout.fileno())
fout.close()
fans.close()
return (RESULT_MAP[crst], int(rst['memoryused']), int(rst['timeused']))
return (RESULT_MAP[rst['result']], 0, 0)
|
SkyZH/CloudOJWatcher
|
ojrunnerlinux.py
|
Python
|
gpl-3.0
| 1,643
|
import os
import traceback
import json
import requests
from flask import Flask, request
from cities_list import CITIES
from messages import get_message, search_keyword
token = os.environ.get('FB_ACCESS_TOKEN')
api_key = os.environ.get('WEATHER_API_KEY')
app = Flask(__name__)
def location_quick_reply(sender, text=None):
if not text:
text = get_message('location-button')
return {
"recipient": {
"id": sender
},
"message": {
"text": text,
"quick_replies": [
{
"content_type": "location",
}
]
}
}
def send_attachment(sender, type, payload):
return {
"recipient": {
"id": sender
},
"message": {
"attachment": {
"type": type,
"payload": payload,
}
}
}
def send_text(sender, text):
return {
"recipient": {
"id": sender
},
"message": {
"text": text
}
}
def send_message(payload):
requests.post('https://graph.facebook.com/v2.6/me/messages/?access_token=' + token, json=payload)
def send_weather_info(sender, **kwargs):
latitude = kwargs.pop('latitude', None)
longitude = kwargs.pop('longitude', None)
city_name = kwargs.pop('city_name', None)
if latitude and longitude:
query = 'lat={}&lon={}'.format(latitude, longitude)
elif city_name:
query = 'q={},br'.format(city_name.title())
url = 'http://api.openweathermap.org/data/2.5/weather?' \
'{}&appid={}&units={}&lang={}'.format(query,
api_key,
'metric',
'pt')
r = requests.get(url)
response = r.json()
print(response)
if 'cod' in response:
if response['cod'] != 200:
return 'error'
name = response['name']
weather = response['main']
wind = response['wind']
elements = [{
'title': name,
'subtitle': 'Temperatura: {} graus'.format(str(weather['temp']).replace('.',',')),
'image_url': 'https://cdn-images-1.medium.com/max/800/1*LkbHjhacSRDNDzupX7pgEQ.jpeg'
}]
for info in response['weather']:
description = info['description'].capitalize()
icon = info['icon']
weather_data = 'Umidade: {}%\n' \
'Pressão: {}\n' \
'Velocidade do vento: {}'.format(weather['humidity'],
weather['pressure'],
wind['speed'])
if 'visibility' in response:
weather_data = '{}\n Visibilidade: {}'.format(weather_data, response['visibility'])
elements.append({
'title': description,
'subtitle': weather_data,
'image_url': 'http://openweathermap.org/img/w/{}.png'.format(icon)
})
payload = send_attachment(sender,
'template',
{
"template_type": "list",
"top_element_style": "large",
"elements": elements,
"buttons": [
{
"title": "Fazer nova pesquisa",
"type": "postback",
"payload": "do_it_again"
}
]
})
send_message(payload)
return None
@app.route('/', methods=['GET', 'POST'])
def webhook():
if request.method == 'POST':
try:
data = json.loads(request.data.decode())
sender = data['entry'][0]['messaging'][0]['sender']['id']
print(data)
if 'message' in data['entry'][0]['messaging'][0]:
message = data['entry'][0]['messaging'][0]['message']
if 'postback' in data['entry'][0]['messaging'][0]:
# Action when user first enters the chat
payload = data['entry'][0]['messaging'][0]['postback']['payload']
if payload == 'begin_button':
message = send_text(sender, 'Olá, tudo bem? Vamos começar?')
send_message(message)
payload = location_quick_reply(sender)
send_message(payload)
return 'Ok'
# Resend the location button
if payload == 'do_it_again':
payload = location_quick_reply(sender)
send_message(payload)
if 'attachments' in message:
if 'payload' in message['attachments'][0]:
if 'coordinates' in message['attachments'][0]['payload']:
location = message['attachments'][0]['payload']['coordinates']
latitude = location['lat']
longitude = location['long']
send_weather_info(sender, latitude=latitude, longitude=longitude)
if _return == 'error':
message = send_text(sender, get_message('error'))
send_message(message)
payload = location_quick_reply(sender)
send_message(payload)
else:
text = message['text']
for city in CITIES:
if text.lower() in city:
_return = send_weather_info(sender, city_name=text)
if _return == 'error':
message = send_text(sender, get_message('error'))
send_message(message)
# Send location button
payload = location_quick_reply(sender)
send_message(payload)
return 'Ok'
# If text not in city list...
chat_message = search_keyword(text)
if chat_message:
# if found keyword, reply with chat stuff
message = send_text(sender, chat_message)
send_message(message)
else:
message = send_text(sender, get_message('not-a-city'))
send_message(message)
# Send location button
payload = location_quick_reply(sender)
send_message(payload)
except Exception as e:
print(traceback.format_exc())
elif request.method == 'GET':
if request.args.get('hub.verify_token') == os.environ.get('FB_VERIFY_TOKEN'):
return request.args.get('hub.challenge')
return "Wrong Verify Token"
return "Nothing"
if __name__ == '__main__':
app.run(debug=True)
|
beckenkamp/weatherbot
|
index.py
|
Python
|
gpl-3.0
| 7,204
|
# -*- coding: utf-8 -*-
"""
This file is part of coffeedatabase.
coffeedatabase is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
coffeedatabase is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with coffeedatabase.. If not, see <http://www.gnu.org/licenses/>.
"""
# system
import readline
import datetime
import configparser
# coffeedatabase
from lib import cuser
from lib import cpayment
from lib import citem
from lib import cdatabase
from lib import cprice
from lib import cbalance
# Completer Class
# For further reference please see
# https://stackoverflow.com/questions/7821661/how-to-code-autocompletion-in-python
class MyCompleter(object): # Custom completer
def __init__(self, options):
self.options = sorted(options)
def complete(self, text, state):
if state == 0: # on first trigger, build possible matches
if text: # cache matches (entries that start with entered text)
self.matches = [s for s in self.options
if text in s]
else: # no text entered, all matches possible
self.matches = self.options[:]
# return match indexed by state
try:
return self.matches[state]
except IndexError:
return None
class ckeyboard:
def __init__(self):
# First, load the config
config = configparser.ConfigParser()
config.sections()
config.read('config.ini')
if not ('FILENAME' in config) or not ('LIST' in config):
print("Broken config file \"config.ini\".")
raise
self.fileUser = config['FILENAME']['fileUser']
self.filePayment = config['FILENAME']['filePayment']
self.fileItem = config['FILENAME']['fileItem']
self.fileMarks = config['FILENAME']['fileMarks']
self.filePrice = config['FILENAME']['filePrice']
self.inactiveMonths = config['LIST']['inactiveMonths']
self.fileTemplateBalanceMonth = config['FILENAME']['fileTemplateBalanceMonth']
self.fileOutBalanceMonth = config['FILENAME']['fileOutBalanceMonth']
self.fileTemplateListMonth = config['FILENAME']['fileTemplateListMonth']
self.fileOutListMonth = config['FILENAME']['fileOutListMonth']
self.fileOutFolder = config['FILENAME']['fileOutFolder']
if (self.fileUser == "") or \
(self.filePayment == "") or \
(self.fileMarks == "") or \
(self.filePrice == "") or \
(self.fileItem == ""):
print("Broken config file \"config.ini\".")
raise
# create databases, if they do not exist.
database = cdatabase.cdatabase(self.fileUser, self.filePayment, self.fileItem, self.fileMarks, self.filePrice)
self.user = cuser.cuser(self.fileUser, self.inactiveMonths)
self.payment = cpayment.cpayment(self.filePayment, self.user)
self.item = citem.citem(self.fileItem, self.fileMarks, self.user)
self.price = cprice.cprice(self.filePrice, self.item)
self.balance = cbalance.cbalance(self.user, self.payment, self.price, self.item, self.inactiveMonths, self.fileTemplateBalanceMonth, self.fileOutBalanceMonth, self.fileTemplateListMonth, self.fileOutListMonth, self.fileOutFolder)
def inputStandard(self, valueDescription, valueStandard):
""" Displays an input field, nicely formatted. If valueDescription contains \"Name\" or \"name\", autocompletion for the name database will be activated.
valueDescription: List of description for input values.
valueStandard: List of standard values.
"""
if not len(valueDescription) == len(valueStandard):
print("Input vector", valueDescription, "has not the same length as standard value vector", valueStandard)
raise
counter = 0
for description in valueDescription:
if description.lower() == "status":
# display special user input field
print("New status:")
print("1 - active")
print("2 - auto")
print("3 - inactive")
textInput = input(str(description) + " [" + valueStandard[counter] + "]: ")
if textInput == "":
textInput = valueStandard[counter]
if textInput == "1" or textInput == "active":
valueStandard[counter] = "active"
elif textInput == "2" or textInput == "auto":
valueStandard[counter] = "auto"
elif textInput == "3" or textInput == "inactive":
valueStandard[counter] = "inactive"
else:
print("The input " + str(textInput) + " was not understood. Please use 1, 2, or 3, active, auto, or inactive.")
raise
else:
if not valueStandard[counter] == "":
textInput = input(str(description) + " [" + valueStandard[counter] + "]: ")
else:
textInput = input(str(description) + ": ")
if not textInput == "":
valueStandard[counter] = textInput
counter += 1
return valueStandard
def userAdd(self):
""" Adds a user to the user database
"""
userDescription = ["Name", "Mail"]
userStandard = ["", "institut@gwdg.de"]
inputUser = self.inputStandard(userDescription, userStandard)
inputUser.append("active")
self.user.userAdd(inputUser)
# Make a dummy payment
now = datetime.datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
user = self.user.getRowByName(inputUser[1], 1)
payment = [user[0], year, month, day, 0]
self.payment.paymentAdd(payment)
# Make dummy marks
mark = [user[0], year, month, day, 0]
for _marks in self.item.marks:
_marks.marksAdd(mark)
return 0
def userChangeInfo(self):
""" Displays user information and allows to change them.
"""
user = self.getRowByTextname(self.user.getNamelist(), self.user)
# remove id
userId = user[0]
del user[0]
print("")
userDescription = ["Name", "Mail", "Status"]
inputUser = self.inputStandard(userDescription, user)
# add user id
inputUser.insert(0, userId)
# save in database
self.user.setUser(inputUser)
return 0
def paymentAdd(self):
""" Adds a payment to the payment database
"""
user = self.getRowByTextname(self.user.getNamelist(), self.user)
# create dates
now = datetime.datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
payment1 = [user[0], int(year), int(month), int(day)]
print("")
userDescription = ["Payment"]
payment2 = [""]
inputUser = self.inputStandard(userDescription, payment2)
# fill payment
payment = payment1 + payment2
# save in database
self.payment.paymentAdd(payment)
# print new balance
self.payment.getDataBinMonth()
self.balance.getDataBinMonth()
self.balance.getBalance(user[0])
return 0
def itemAdd(self):
""" Adds a user to the user database
"""
itemDescription = ["Name", "Unit"]
itemStandard = ["Coffee", "per cup"]
inputItem = self.inputStandard(itemDescription, itemStandard)
inputItem.append("active")
self.item.itemAdd(inputItem)
return 0
def itemChangeInfo(self):
""" Displays item information and allows to change them.
"""
item = self.getRowByTextname(self.item.getColumn(1), self.item)
# remove id
itemId = item[0]
del item[0]
print("")
itemDescription = ["Name", "Unit", "Status"]
inputItem = self.inputStandard(itemDescription, item)
# add item id
inputItem.insert(0, itemId)
# save in database
self.item.setItem(inputItem)
return 0
def getRowByTextname(self, array, database):
""" Displays a name field and returns row.
array: Array used for auto completion in text input field.
database: Reference to database class, e.g. self.item, self.user, ...
"""
completer = MyCompleter(array)
readline.set_completer(completer.complete)
readline.parse_and_bind('tab: complete')
print("Search in item database:")
inputText = input("Name: ")
return database.getRowByName(inputText, 1)
def marksAdd(self):
""" Adds marks to the marks database
"""
# create dates
now = datetime.datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
self.item.data
# get user
user = self.getRowByTextname(self.user.getNamelist(), self.user)
# get item list
markDescription = []
markDefault = []
for row in self.item.data:
if str(row[3]) == "active":
markDescription.append(row[1])
markDefault.append("0")
# query user input
print("")
inputMark = self.inputStandard(markDescription, markDefault)
# create array for cmark class
markArray = [[0 for x in range(0)] for x in range(0)]
counter = 0
for row in self.item.data:
if str(row[3]) == "active":
markArray.append([user[0], int(year), int(month), int(day), int(inputMark[counter])])
counter += 1
else:
markArray.append([user[0], int(year), int(month), int(day), 0])
# save in database
self.item.marksAdd(markArray)
return 0
def marksAddAll(self):
""" Adds marks to the marks database for all active users
"""
# This list holds all our active and auto active users
userActive = self.user.getIdByStatus("active")
# Check for auto active users in payment and marks
userAuto = self.user.getIdByStatus("auto")
userAutoM = self.payment.getIdDataBinMonthActive(self.inactiveMonths)
for marks in self.item.marks:
userAutoT = marks.getIdDataBinMonthActive(self.inactiveMonths)
userAutoM = userAutoM + userAutoT
userAutoM = list(set(userAutoM))
# which user is active in last n months and auto active?
userAuto = list(set(userAuto).intersection(userAutoM))
# merge both lists
userActive = userActive + userAuto
# remove double entries
userActive = list(set(userActive))
# remove inactive users
userInactive = self.user.getIdByStatus("inactive")
userInactive = list(set(userActive).intersection(userInactive))
userActive = [x for x in userActive if x not in userInactive]
# sort
userActive.sort()
# create dates
now = datetime.datetime.now()
year = int(now.strftime("%Y"))
month = int(now.strftime("%m"))
day = int(now.strftime("%d"))
# This is done usually in the following month, meaning we need to adapt the date to last month
month -= 1
day = 1
if month == 0:
month = 12
for userId in userActive:
user = self.user.getRowById(userId)
print("\n", user[1])
# get item list
markDescription = []
markDefault = []
for row in self.item.data:
if str(row[3]) == "active":
markDescription.append(row[1])
markDefault.append("0")
# query user input
print("")
inputMark = self.inputStandard(markDescription, markDefault)
# create array for cmark class
markArray = [[0 for x in range(0)] for x in range(0)]
counter = 0
for row in self.item.data:
if str(row[3]) == "active":
markArray.append([user[0], int(year), int(month), int(day), int(inputMark[counter])])
counter += 1
else:
markArray.append([user[0], int(year), int(month), int(day), 0])
# save in database
self.item.marksAdd(markArray)
return 0
def priceAdd(self):
""" Adds a price the price database
"""
priceDescription = []
priceStandard = []
itemId = []
priceOld = [[0 for x in range(0)] for x in range(0)]
# acquiere old prices, save as [itemId, price]
for row in self.price.dataBinMonth:
if len(row) >= 2:
for x in range(0, len(row)-1):
if not float(row[-1-x]) == 0:
priceOld.append([row[0], str(row[-1-x])])
break
# create input fields
for row in self.item.data:
priceDescription.append(str(row[1]) + " " + str(row[2]))
priceOldAdded = False
for row1 in priceOld:
if row[0] == row1[0]:
priceStandard.append(row1[1])
priceOldAdded = True
if not priceOldAdded:
priceStandard.append("0")
itemId.append(row[0])
inputPrice= self.inputStandard(priceDescription, priceStandard)
# create dates
now = datetime.datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
counter = 0
for row in itemId:
self.price.priceAdd([row, year, month, day, inputPrice[counter]])
counter += 1
return 0
def priceFill(self):
""" Checks the marks database and matches marks with prices. If a price does not exist, it is requested and added to the price database.
"""
itemId=0
for row in self.item.data:
print ("Checking for item " + str(row[1]))
# Check for marks
self.item.marks[itemId].getDataBinMonth()
marks = self.item.marks[itemId].dataBinMonthHeader
# Check for prices
pricesH = self.price.dataBinMonthHeader
pricesF = self.price.dataBinMonth
prices = []
# Find Id in pricesF
for rowId in pricesF:
if rowId[0] == row[0]:
prices = rowId
del prices[0]
# If Id was not found, we create an empty array
if len(prices) == 0:
if len(pricesF) >= 1:
prices = [0 for x in range(len(pricesF[0])-1)]
# Find missing prices in Header
for mark in marks:
priceFound = False
for price in pricesH:
if mark == price:
priceFound = True
if not priceFound:
pricesH.append(mark)
prices.append(0)
# Find empty prices
priceMissing = [[0 for x in range(0)] for x in range(0)]
counter = 0
for price in prices:
if price == 0:
priceMissing.append(pricesH[counter])
counter += 1
# Request user input for missing prices
princeLatest = "0"
for price in priceMissing:
priceDescription = ["Enter price for " + str(row[1]) + " for year " + str(price[0]) + " and month " + str(price[1])]
priceStandard = [princeLatest]
inputPrice= self.inputStandard(priceDescription, priceStandard)
princeLatest = inputPrice[0]
# save prices
self.price.priceAdd([row[0], price[0], price[1], 1, str(inputPrice[0])])
itemId += 1
return 0
def balanceExportPDF(self):
""" Compute the balance
"""
# create dates
now = datetime.datetime.now()
year = int(now.strftime("%Y"))
month = int(now.strftime("%m"))
dateDescription = ["Year", "Month"]
dateStandard = [str(year), str(month)]
inputDate = self.inputStandard(dateDescription, dateStandard)
# create balance class
self.balance.exportMonthPDF(inputDate[0], inputDate[1], 1)
def listExportPDF(self):
""" Compute the name list
"""
# create dates
now = datetime.datetime.now()
year = int(now.strftime("%Y"))
month = int(now.strftime("%m"))
dateDescription = ["Year", "Month"]
dateStandard = [str(year), str(month)]
inputDate = self.inputStandard(dateDescription, dateStandard)
# create balance class
self.balance.exportMonthListPDF(inputDate[0], inputDate[1], 1)
def balanceCheck(self):
""" Prints a users balance
"""
user = self.getRowByTextname(self.user.getNamelist(), self.user)
# print balance
self.balance.getBalance(user[0])
return 0
|
simonreich/coffeedatabase
|
lib/ckeyboard.py
|
Python
|
gpl-3.0
| 17,891
|
"""
BORIS
Behavioral Observation Research Interactive Software
Copyright 2012-2022 Olivier Friard
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import os
import pathlib
import re
import sys
from decimal import Decimal as dc
import tablib
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QFileDialog, QInputDialog, QMessageBox)
from boris import dialog
from boris import project_functions
from boris import select_observations
from boris import utilities
from boris.config import *
def create_behavior_binary_table(pj: dict, selected_observations: list, parameters_obs: dict,
time_interval: float) -> dict:
"""
create behavior binary table
Args:
pj (dict): project dictionary
selected_observations (list): list of selected observations
parameters_obs (dict): dcit of parameters
time_interval (float): time interval (in seconds)
Returns:
dict: dictionary of tablib dataset
"""
results_df = {}
state_behavior_codes = [
x for x in utilities.state_behavior_codes(pj[ETHOGRAM]) if x in parameters_obs[SELECTED_BEHAVIORS]
]
point_behavior_codes = [
x for x in utilities.point_behavior_codes(pj[ETHOGRAM]) if x in parameters_obs[SELECTED_BEHAVIORS]
]
if not state_behavior_codes and not point_behavior_codes:
return {"error": True, "msg": "No state events selected"}
for obs_id in selected_observations:
start_time = parameters_obs[START_TIME]
end_time = parameters_obs[END_TIME]
# check observation interval
if parameters_obs["time"] == TIME_FULL_OBS:
max_obs_length, _ = project_functions.observation_length(pj, [obs_id])
start_time = dc("0.000")
end_time = dc(max_obs_length)
if parameters_obs["time"] == TIME_EVENTS:
try:
start_time = dc(pj[OBSERVATIONS][obs_id][EVENTS][0][0])
except Exception:
start_time = dc("0.000")
try:
end_time = dc(pj[OBSERVATIONS][obs_id][EVENTS][-1][0])
except Exception:
max_obs_length, _ = project_functions.observation_length(pj, [obs_id])
end_time = dc(max_obs_length)
if obs_id not in results_df:
results_df[obs_id] = {}
for subject in parameters_obs[SELECTED_SUBJECTS]:
# extract tuple (behavior, modifier)
behav_modif_list = [(idx[2], idx[3]) for idx in pj[OBSERVATIONS][obs_id][EVENTS] if idx[1] == (
subject if subject != NO_FOCAL_SUBJECT else "") and idx[2] in parameters_obs[SELECTED_BEHAVIORS]]
# extract observed subjects NOT USED at the moment
observed_subjects = [event[EVENT_SUBJECT_FIELD_IDX] for event in pj[OBSERVATIONS][obs_id][EVENTS]]
# add selected behavior if not found in (behavior, modifier)
if not parameters_obs[EXCLUDE_BEHAVIORS]:
#for behav in state_behavior_codes:
for behav in parameters_obs[SELECTED_BEHAVIORS]:
if behav not in [x[0] for x in behav_modif_list]:
behav_modif_list.append((behav, ""))
behav_modif_set = set(behav_modif_list)
observed_behav = [(x[0], x[1]) for x in sorted(behav_modif_set)]
if parameters_obs[INCLUDE_MODIFIERS]:
results_df[obs_id][subject] = tablib.Dataset(
headers=["time"] + [f"{x[0]}" + f" ({x[1]})" * (x[1] != "") for x in sorted(behav_modif_set)])
else:
results_df[obs_id][subject] = tablib.Dataset(headers=["time"] + [x[0] for x in sorted(behav_modif_set)])
if subject == NO_FOCAL_SUBJECT:
sel_subject_dict = {"": {SUBJECT_NAME: ""}}
else:
sel_subject_dict = dict([
(idx, pj[SUBJECTS][idx]) for idx in pj[SUBJECTS] if pj[SUBJECTS][idx][SUBJECT_NAME] == subject
])
row_idx = 0
t = start_time
while t <= end_time:
# state events
current_states = utilities.get_current_states_modifiers_by_subject_2(
state_behavior_codes, pj[OBSERVATIONS][obs_id][EVENTS], sel_subject_dict, t)
# point events
current_point = utilities.get_current_points_by_subject(point_behavior_codes,
pj[OBSERVATIONS][obs_id][EVENTS],
sel_subject_dict, t, time_interval)
cols = [float(t)] # time
for behav in observed_behav:
if behav[0] in state_behavior_codes:
cols.append(int(behav in current_states[list(current_states.keys())[0]]))
if behav[0] in point_behavior_codes:
cols.append(current_point[list(current_point.keys())[0]].count(behav))
results_df[obs_id][subject].append(cols)
t += time_interval
row_idx += 1
return results_df
def behavior_binary_table(pj: dict):
"""
ask user for parameters for behavior binary table
call create_behavior_binary_table
"""
_, selected_observations = select_observations.select_observations(
pj, MULTIPLE, "Select observations for the behavior binary table")
if not selected_observations:
return
# check if state events are paired
out = ""
not_paired_obs_list = []
for obs_id in selected_observations:
r, msg = project_functions.check_state_events_obs(obs_id, pj[ETHOGRAM], pj[OBSERVATIONS][obs_id])
if not r:
out += f"Observation: <strong>{obs_id}</strong><br>{msg}<br>"
not_paired_obs_list.append(obs_id)
if out:
out = f"The observations with UNPAIRED state events will be removed from the analysis<br><br>{out}"
results = dialog.Results_dialog()
results.setWindowTitle(f"{programName} - Check selected observations")
results.ptText.setReadOnly(True)
results.ptText.appendHtml(out)
results.pbSave.setVisible(False)
results.pbCancel.setVisible(True)
if not results.exec_():
return
selected_observations = [x for x in selected_observations if x not in not_paired_obs_list]
if not selected_observations:
return
max_obs_length, _ = project_functions.observation_length(pj, selected_observations)
if max_obs_length == -1: # media length not available, user choose to not use events
return
parameters = dialog.choose_obs_subj_behav_category(pj,
selected_observations,
maxTime=max_obs_length,
flagShowIncludeModifiers=True,
flagShowExcludeBehaviorsWoEvents=True,
by_category=False)
if not parameters[SELECTED_SUBJECTS] or not parameters[SELECTED_BEHAVIORS]:
QMessageBox.warning(None, programName, "Select subject(s) and behavior(s) to analyze")
return
# ask for time interval
i, ok = QInputDialog.getDouble(None, "Behavior binary table", "Time interval (in seconds):", 1.0, 0.001, 86400, 3)
if not ok:
return
time_interval = utilities.float2decimal(i)
results_df = create_behavior_binary_table(pj, selected_observations, parameters, time_interval)
if "error" in results_df:
QMessageBox.warning(None, programName, results_df["msg"])
return
# save results
if len(selected_observations) == 1:
extended_file_formats = [
"Tab Separated Values (*.tsv)", "Comma Separated Values (*.csv)", "Open Document Spreadsheet ODS (*.ods)",
"Microsoft Excel Spreadsheet XLSX (*.xlsx)", "Legacy Microsoft Excel Spreadsheet XLS (*.xls)",
"HTML (*.html)"
]
file_formats = ["tsv", "csv", "ods", "xlsx", "xls", "html"]
file_name, filter_ = QFileDialog().getSaveFileName(None, "Save results", "", ";;".join(extended_file_formats))
if not file_name:
return
output_format = file_formats[extended_file_formats.index(filter_)]
if pathlib.Path(file_name).suffix != "." + output_format:
file_name = str(pathlib.Path(file_name)) + "." + output_format
# check if file with new extension already exists
if pathlib.Path(file_name).is_file():
if dialog.MessageDialog(programName, f"The file {file_name} already exists.",
[CANCEL, OVERWRITE]) == CANCEL:
return
else:
items = ("Tab Separated Values (*.tsv)", "Comma separated values (*.csv)", "Open Document Spreadsheet (*.ods)",
"Microsoft Excel Spreadsheet XLSX (*.xlsx)", "Legacy Microsoft Excel Spreadsheet XLS (*.xls)",
"HTML (*.html)")
item, ok = QInputDialog.getItem(None, "Save results", "Available formats", items, 0, False)
if not ok:
return
output_format = re.sub(".* \(\*\.", "", item)[:-1]
export_dir = QFileDialog().getExistingDirectory(None,
"Choose a directory to save results",
os.path.expanduser("~"),
options=QFileDialog.ShowDirsOnly)
if not export_dir:
return
mem_command = ""
for obs_id in results_df:
for subject in results_df[obs_id]:
if len(selected_observations) > 1:
file_name_with_subject = str(
pathlib.Path(export_dir) / utilities.safeFileName(obs_id + "_" + subject)) + "." + output_format
else:
file_name_with_subject = str(os.path.splitext(file_name)[0] +
utilities.safeFileName("_" + subject)) + "." + output_format
# check if file with new extension already exists
if mem_command != OVERWRITE_ALL and pathlib.Path(file_name_with_subject).is_file():
if mem_command == "Skip all":
continue
mem_command = dialog.MessageDialog(programName, f"The file {file_name_with_subject} already exists.",
[OVERWRITE, OVERWRITE_ALL, "Skip", "Skip all", CANCEL])
if mem_command == CANCEL:
return
if mem_command in ["Skip", "Skip all"]:
continue
try:
if output_format in ["csv", "tsv", "html"]:
with open(file_name_with_subject, "wb") as f:
f.write(str.encode(results_df[obs_id][subject].export(output_format)))
if output_format in ["ods", "xlsx", "xls"]:
with open(file_name_with_subject, "wb") as f:
f.write(results_df[obs_id][subject].export(output_format))
except Exception:
error_type, error_file_name, error_lineno = utilities.error_info(sys.exc_info())
logging.critical(
f"Error in behavior binary table function: {error_type} {error_file_name} {error_lineno}")
QMessageBox.critical(None, programName, f"Error saving file: {error_type}")
return
|
olivierfriard/BORIS
|
boris/behavior_binary_table.py
|
Python
|
gpl-3.0
| 12,397
|
from builtins import str
from builtins import object
import httplib2
import MySQLdb
import json
import os
import sys
import time
import config
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
class Error(Exception):
"""Custom Exception subclass."""
pass
class YoutubeCaption(object):
OAUTH_SCOPE = "https://gdata.youtube.com"
CAPTIONS_URL_FORMAT = ("http://gdata.youtube.com/feeds/api/videos/%s/" \
"captions?alt=json")
CAPTIONS_CONTENT_TYPE = "application/vnd.youtube.timedtext; charset=UTF-8"
CAPTIONS_LANGUAGE_CODE = "en"
CAPTIONS_TITLE = ""
def __init__(self, developer_key, client_id, client_secret):
self.CLIENT_ID = client_id
self.CLIENT_SECRET = client_secret
self.DEVELOPER_KEY = developer_key
def authenticate(self):
storage = Storage('youtube-oauth.storage')
self.credentials = storage.get()
if self.credentials is None or self.credentials.invalid:
flow = OAuth2WebServerFlow(
client_id = self.CLIENT_ID,
client_secret = self.CLIENT_SECRET,
scope = self.OAUTH_SCOPE,
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) \
Gecko/20100101 Firefox/31.0'
)
self.credentials = run(flow, storage)
def setup_http_request_object(self):
self.headers = {
"GData-Version": "2",
"X-GData-Key": "key=%s" % self.DEVELOPER_KEY
}
self.http = self.credentials.authorize(httplib2.Http())
def upload_translated_captions(self, srt_file_path, video_id):
try:
self.authenticate()
self.setup_http_request_object()
except Exception as e:
raise Error("Error while authenticating: %s" % str(e))
self.headers["Content-Type"] = self.CAPTIONS_CONTENT_TYPE
self.headers["Content-Language"] = self.CAPTIONS_LANGUAGE_CODE
self.headers["Slug"] = self.CAPTIONS_TITLE
srt_file = open(srt_file_path)
self.translated_captions_body = srt_file.read()
url = self.CAPTIONS_URL_FORMAT % video_id
response_headers, body = self.http.request (
url,
"POST",
body = self.translated_captions_body,
headers = self.headers
)
if response_headers["status"] != "201":
return "Received HTTP response %s when uploading captions \
to %s." % (response_headers["status"], url), False
return '%s - %s %s - caption updated' % (video_id, \
self.CAPTIONS_LANGUAGE_CODE, self.CAPTIONS_TITLE), True
def set_caption_language_title(self, language='', title=''):
self.CAPTIONS_LANGUAGE_CODE = language
self.CAPTIONS_TITLE = title
if __name__ == "__main__":
caption = YoutubeCaption(config.DEVELOPER_KEY, config.CLIENT_ID, \
config.CLIENT_SECRET)
db = MySQLdb.connect(host = config.DB_HOST, user = config.DB_USER, \
passwd = config.DB_PASS, db = config.DB_NAME)
ldb = MySQLdb.connect(host = config.DB_HOST, user = config.DB_USER, \
passwd = config.DB_PASS, db = 'cron_logs')
db_cursor = db.cursor()
db_cursor.execute("select ctr.id, ctr.language_id, ctr.video, \
ctr.tutorial_detail_id, ctr.video_id, ctd.foss_id, ctd.tutorial from \
creation_tutorialresource ctr INNER JOIN creation_tutorialdetail ctd \
ON ( ctr.tutorial_detail_id = ctd.id ) WHERE ((ctr.status = 1 OR \
ctr.status = 2 ) AND ctr.video_id IS NOT NULL AND ctr.id NOT IN \
(select distinct trid from cron_logs.srt_uploads)) ORDER BY \
ctd.foss_id, ctd.level_id, ctd.order ASC")
rows = db_cursor.fetchall()
ldb = MySQLdb.connect(host = config.DB_HOST, user = config.DB_USER, \
passwd = config.DB_PASS, db = 'cron_logs')
ldb_cursor = ldb.cursor()
for row in rows:
overall_status = 0
db_cursor.execute("select id, name, code from creation_language \
where id = %s", [str(row[1]),])
language = db_cursor.fetchone()
video_title = str(row[6].replace(' ', '-'))
video_path = config.MEDIA_ROOT + 'videos/' + str(row[5]) + '/' + \
str(row[3]) + '/'
english_srt = video_path + video_title + '-English.srt'
status_flag = False
file_missing = False
print('')
print(('FOSS Id:', row[5]))
print(('Tutorial:', row[6]))
print(('Language:', language[1]))
if os.path.isfile(english_srt):
file_missing = False
ldb_cursor.execute("select * from srt_pending_uploads where trid=" \
+ str(row[0]) + " and native_or_english=0")
esrt_row = ldb_cursor.fetchone()
#print 'e------------', esrt_row, '----------'
if esrt_row is None:
caption.set_caption_language_title('en')
message, status_flag = caption.upload_translated_captions(\
english_srt, row[4])
if status_flag:
ldb_cursor.execute("insert into srt_pending_uploads \
(trid,native_or_english) values(%s, 0)", \
[str(row[0]),])
ldb.commit()
overall_status = 1
print(message)
else:
print((row[4], '- English - Already Exist'))
overall_status = 1
else:
file_missing = True
print((row[4], '- English -', 'SRT File Missing'))
if language[1] != 'English':
native_srt = video_path + video_title + '-' + language[1] + '.srt'
if os.path.isfile(native_srt):
ldb_cursor.execute("select * from srt_pending_uploads where \
trid=" + str(row[0]) + " and native_or_english=1")
nsrt_row = ldb_cursor.fetchone()
#print 'n------------', nsrt_row, '----------'
if nsrt_row is None:
file_missing = False
language_title = ''
if language[2] == 'en':
language_title = language[1]
caption.set_caption_language_title(language[2], \
language_title)
message, status_flag = caption.upload_translated_captions(\
native_srt, row[4])
if status_flag:
ldb_cursor.execute("insert into srt_pending_uploads \
(trid,native_or_english) values(%s, 1)", \
[str(row[0]),])
ldb.commit()
print(message)
else:
print((row[4], '-', language[1], '- Already Exist'))
status_flag = True
else:
file_missing = True
print((row[4], '-', language[1], '-', 'SRT File Missing'))
status_flag = False
if status_flag and overall_status:
ldb_cursor.execute("insert into srt_uploads (trid) values(%s)", \
[str(row[0]),])
ldb.commit()
elif file_missing:
continue
else:
time.sleep(1)
time.sleep(1)
|
Spoken-tutorial/spoken-website
|
cron/upload-subtitle.py
|
Python
|
gpl-3.0
| 7,439
|
#!/usr/bin/env python
# File created on 15 Jul 2011
from __future__ import division
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2011-2013, The PICRUSt Project"
__credits__ = ["Jesse Zaneveld","Morgan Langille"]
__license__ = "GPL"
__version__ = "1.0.0-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
__status__ = "Development"
from os.path import splitext
from string import maketrans
from sys import getrecursionlimit,setrecursionlimit
import re
from cogent.parse.tree import DndParser
from cogent.util.option_parsing import parse_command_line_parameters,\
make_option
from picrust.parse import parse_trait_table,yield_trait_table_fields
from util import PicrustNode
def reformat_tree_and_trait_table(tree,trait_table_lines,trait_to_tree_mapping,\
input_trait_table_delimiter="\t", output_trait_table_delimiter="\t",\
filter_table_by_tree_tips=True, convert_trait_floats_to_ints=False,\
filter_tree_by_table_entries=True,convert_to_bifurcating=False,\
add_branch_length_to_root=False, name_unnamed_nodes=True,\
remove_whitespace_from_labels = True,replace_ambiguous_states=True,\
replace_problematic_label_characters = True,min_branch_length=0.0001,\
verbose=True):
"""Return a full reformatted tree,pruned reformatted tree and set of trait table lines
tree - a PyCogent PhyloNode tree object
trait_table_lines -- the lines of a trait table, where
the rows are organisms and the columns are traits (e.g. gene counts).
trait_id_to_tree_mapping -- a dict keyed by trait table ids, with
values of tree ids. If provided, trait table ids will be mapped to
tree ids
filter_table_by_tree_tips -- if True, remove trait table rows that don't map to ids on the
tree
convert_trait_floats_to_ints -- if True, convert floating point values in trait table cells to integers.
filter_tree_by_table_entries -- if True, save only the subtree that encompasses organisms in the trait table.
(equivalent to removing all tips in the tree that don't map to the trait table)
convert_to_bifurcating -- if True, ensure that the tree is fully bifurcating by resolving polytomies with very short
branches.
add_branch_length_to_root -- if True, ensure that the root node has a minimum branch length
name_unnamed_nodes -- if True, name unnamed nodes in the tree. (Useful for ensuring internal nodes can be
consistently identified in both the reference and pruned trees)
remove_whitespace_from_labels -- if True, replace whitespace in organism labels with underscores
replace_ambiguous_states -- if True, replace various strings representing ambiguous character states,
as well as '-1' or -1 (used by IMG to represent a lack of data) with 0 values.
replace_problematic_table_chars -- if True, replace ':' and ';' in the results with '_', and remove double quotes.
(AncSR methods like ace can't handle these characters in organism labels)
min_branch_length -- set the minimum branch length for all edges in the tree.
This function combines the various reformatting functions in the
library into a catch-all reformatter.
TODO: This function is monolithic, so despite the individual
parts being tested seperately, it probably needs to be broken
down into several modular parts. This would need to be done
with care however, as the order of steps matters quite a bit.
"""
input_tree = tree
#Parse lines to fields once
if trait_table_lines:
if verbose:
print "Parsing trait table...."
header_line,trait_table_fields =\
parse_trait_table(trait_table_lines,delimiter = input_trait_table_delimiter)
else:
if verbose:
print "Found no trait table lines. Setting data and header to empty"
trait_table_fields = []
header_line = ''
# Tree reformatting
if convert_to_bifurcating:
if verbose:
print "Converting tree to bifurcating...."
#maximum recursion depth on large trees
#Try working around this issue with a large
#recursion depth limit
old_recursion_limit = getrecursionlimit()
setrecursionlimit(50000)
input_tree = input_tree.bifurcating() # Required by most ancSR programs
setrecursionlimit(old_recursion_limit)
#input_tree = ensure_root_is_bifurcating(input_tree)
# The below nutty-looking re-filtering step is necessary
# When ensuring the root is bifurcating, internal nodes can
#get moved to the tips so without additional filtering we
#get unannotated tip nodes
#if filter_tree_by_table_entries:
# input_tree = filter_tree_tips_by_presence_in_table(input_tree,\
# trait_table_fields,delimiter=input_trait_table_delimiter)
#Name unnamed nodes
if name_unnamed_nodes:
if verbose:
print "Naming unnamed nodes in the reference tree...."
input_tree=make_internal_nodes_unique(input_tree)
#input_tree.nameUnnamedNodes()
check_node_labels(input_tree,verbose=verbose)
#Paranoid check for missing names:
#if verbose:
# print "Checking that all nodes were named..."
#for i,n in enumerate(input_tree.preorder()):
# if n.Name is None:
# raise ValueError('Node #%s (in tree.preorder()) was not named!'%str(i))
#map trait table ids to tree ids
if trait_to_tree_mapping:
#if verbose:
# print "Validating that trait --> tree mappings match tree ids..."
# good,bad = validate_trait_table_to_tree_mappings(input_tree,\
# trait_to_tree_mapping.values(), verbose = True)
# print "Found %i valid ids." %(len(good))
# print "Found %i invalid ids." %(len(bad))
# #if bad:
# # raise RuntimeError("The following putative tree ids in mapping file aren't actually in the input tree: %s" % bad)
if verbose:
print "Remapping trait table ids to match tree ids...."
trait_table_fields =\
remap_trait_table_organisms(trait_table_fields,trait_to_tree_mapping,\
verbose = verbose)
label_conversion_fns =\
set_label_conversion_fns(remove_whitespace_from_labels=remove_whitespace_from_labels,\
replace_problematic_label_characters=replace_problematic_label_characters)
value_conversion_fns = set_value_conversion_fns(replace_ambiguous_states=replace_ambiguous_states,\
convert_trait_floats_to_ints=convert_trait_floats_to_ints)
#Apply both label and value converters to the trait table
trait_table_fields = convert_trait_table_entries(\
trait_table_fields,\
value_conversion_fns = value_conversion_fns,\
label_conversion_fns = label_conversion_fns)
#We now need to apply any formatting functions to the tree nodes as well, to ensure
#that names are consistent between the two.
if label_conversion_fns:
input_tree = fix_tree_labels(input_tree, label_conversion_fns)
#Then filter the trait table to include only tree tips
if filter_table_by_tree_tips:
if verbose:
print "Filtering trait table ids to include only those that match tree ids...."
trait_table_fields = filter_table_by_presence_in_tree(input_tree,\
trait_table_fields,delimiter=input_trait_table_delimiter)
#if verbose:
# print "Verifying that new trait table ids match tree:"
# print "# of trait_table_lines: %i" %len(trait_table_lines)
# all_tip_ids = [tip.Name for tip in input_tree.iterTips()]
# print "example tree tip ids:",all_tip_ids[0:10]
if filter_tree_by_table_entries:
if verbose:
print "filtering tree tips to match entries in trait table...."
input_tree = filter_tree_tips_by_presence_in_table(input_tree,\
trait_table_fields,delimiter=input_trait_table_delimiter,\
verbose=verbose)
if min_branch_length:
if verbose:
print "Setting a min branch length of %f throughout tree...." \
% min_branch_length
input_tree = set_min_branch_length(input_tree,min_length = min_branch_length)
if add_branch_length_to_root:
if vebose:
print "Adding a min branch length of %f to the root node...." \
% min_branch_length
input_tree = add_branch_length_to_root(input_tree,root_name=input_tree.Name,\
root_length=min_branch_length)
if verbose:
print "Performing a final round of tree pruning to remove internal nodes with only one child...."
input_tree.prune()
#Format resulting trait table lines
result_trait_table_lines = [header_line]
result_trait_table_lines.extend([output_trait_table_delimiter.join(f) for f in trait_table_fields])
if verbose:
print "Final reprocessing of trait table lines to remove trailing whitespace..."
result_trait_table_lines =\
[line.strip() for line in result_trait_table_lines if line.strip()]
if verbose:
print "Done reformatting tree and trait table"
return input_tree, result_trait_table_lines
def check_node_labels(input_tree,verbose=False):
"""Check that all nodes are named!"""
if verbose:
print "Checking that all nodes were named..."
for i,n in enumerate(input_tree.preorder()):
print i,n.Name, n.NameLoaded
if n.Name is None:
err_text = 'WARNING: Node #%s (in tree.preorder()) was not named!. Node properties: %s'%(str(i),str(dir(n)))
print err_text
def set_label_conversion_fns(remove_whitespace_from_labels=True,\
replace_problematic_label_characters=True,verbose=False):
"""Return a list of functions for formatting tree node or trait table labels"""
#Set the functions that will be applied to trait table labels
label_conversion_fns = []
if remove_whitespace_from_labels:
if verbose:
print "Removing whitespace from trait table organism labels..."
label_conversion_fns.append(remove_spaces)
if replace_problematic_label_characters:
# Replace ambiguous characters with
replacement_dict ={":":"_",";":"_"}
if verbose:
print "Replacing problematic labels in organism labels:"
for k,v in replacement_dict.items():
print k,'-->',v
chars_to_delete = """'"'"""
replace_problematic_chars_fn =\
make_char_translation_fn(replacement_dict,chars_to_delete)
label_conversion_fns.append(replace_problematic_chars_fn)
return label_conversion_fns
def set_value_conversion_fns(replace_ambiguous_states=True,\
convert_trait_floats_to_ints=False,verbose=False):
"""Return a list of value conversion functions for trait table values
replace_ambiguous_states -- if True, replace values of -,
-1,'-1','NULL' or None to 0
convert_trait_floats_to_ints -- if True convert floats to ints
verbose -- print verbose output describing the conversion fns
"""
#Set the functions that will be applied to trait table values
value_conversion_fns = []
if replace_ambiguous_states:
# Replace ambiguous characters with 0's
replacement_dict ={'-':0,'-1':0,-1:0,'NULL':0,None:0}
if verbose:
print "Replacing ambiguous characters:"
for k,v in replacement_dict.items():
print k,'-->',v
replace_ambig_fn = make_translate_conversion_fn(replacement_dict)
value_conversion_fns.append(replace_ambig_fn)
if convert_trait_floats_to_ints:
value_conversion_fns.append(lambda x: str(int(float(x))))
if verbose:
print "Converting floating point trait table values to integers...."
return value_conversion_fns
def fix_tree_labels(tree,label_conversion_fns,verbose=False):
"""Fix tree labels by removing problematic characters"""
if verbose:
print "reformatting tree node names..."
tree = format_tree_node_names(tree,label_conversion_fns)
#print "Number of tree tips with single quotes:",len([t.Name for t in tree if "'" in t.Name])
return tree
def make_internal_nodes_unique(tree,base_name='internal_node_%i'):
""" Removes names that are not unique for internal nodes.
First occurence of non-unique node is kept and subsequence ones are set to None"""
#make a list of the names that are already in the tree
names_in_use = set()
for i,node in enumerate(tree.preorder(include_self=True)):
if node.Name is not None:
if node.Name in names_in_use:
node.Name=None
else:
names_in_use.add(node.Name)
if node.Name is None:
while node.Name is None:
#Find a unique name by adding integers
proposed_name = base_name % i
if proposed_name not in names_in_use:
node.Name = proposed_name
names_in_use.add(proposed_name)
break
else:
i += 1
#Set this so that the PhyloNode *actually* outputs the Name
node.NameLoaded = True
return tree
def format_tree_node_names(tree,label_formatting_fns=[]):
"""Return tree with node names formatted using specified fns
tree -- a PyCogent PhyloNode tree object
formatting_fns -- a list of formatting functions that are to
be called on each node name in the tree, and which each return
a new node name.
"""
for n in tree.preorder():
if n.Name is None:
continue
new_node_name = n.Name
for formatting_fn in label_formatting_fns:
new_node_name = formatting_fn(new_node_name)
n.Name = new_node_name
return tree
def nexus_lines_from_tree(tree):
"""Return NEXUS formatted lines from a PyCogent PhyloNode tree"""
lines = ["#NEXUS"]
lines.extend(make_nexus_trees_block(tree))
return lines
def add_branch_length_to_root(tree, root_name ="root",root_length=0.0001):
"""Add branch length to the root of a tree if it's shorter than root_length
tree -- A PyCogent PhyloNode object
root_name -- the name of the root node
root_length -- the desired minimum root length
This is required by some programs such as BayesTraits"""
root = tree.getNodeMatchingName(root_name)
root.Length = max(root.Length,root_length)
return tree
def set_min_branch_length(tree,min_length= 0.0001):
"""Return tree modified so that all branchlengths are >= min_length.
tree -- a PyCogent PhyloNode object"""
for node in tree.preorder():
if not node.Parent:
continue
node.Length = max(node.Length,min_length)
return tree
def make_nexus_trees_block(tree):
"""Generate a NEXUS format 'trees' block for a given tree
WARNING: Removes names from internal nodes, as these cause problems
downstream
"""
# First generate the mappings for the NEXUS translate command
trees_block_template =\
["begin trees;",\
"\ttranslate"]
name_mappings = {}
line = None
for i,node in enumerate(tree.iterTips()):
name_mappings[node.Name] = i
if line:
trees_block_template.append(line)
line = "\t\t%i %s," %(i,node.Name)
# The last line needs a semicolon rather than a comma
line = "\t\t%i %s;" %(i,node.Name)
trees_block_template.append(line)
# Reformat tree newick such that names match NEXUS translation table
for name_to_fix in name_mappings.keys():
node_to_rename = tree.getNodeMatchingName(name_to_fix)
node_to_rename.Name=name_mappings[name_to_fix]
for nonTipNode in tree.iterNontips():
nonTipNode.Name=''
tree_newick = tree.getNewick(with_distances=True)
#for name_to_fix in name_mappings.keys():
# tree_newick = tree_newick.replace(name_to_fix+":",str(name_mappings[name_to_fix])+":")
#for nonTipNode in tree.iterNontips():
# tree_newick = tree_newick.replace(nonTipNode.Name+":","")
#tree_newick = tree_newick.replace(root_name,"")
tree_template = "\t\ttree %s = %s" # tree name then newick string
line = tree_template % ("PyCogent_tree",tree_newick)
trees_block_template.append(line)
trees_block_template.append("end;")
return trees_block_template
def validate_trait_table_to_tree_mappings(tree,trait_table_ids,verbose=True):
"""Report whether tree ids are even in mapping file"""
good = []
bad = []
nodes = [n.Name for n in tree.iterTips()]
for tt_id in trait_table_ids:
if tt_id in nodes:
good.append(tt_id)
else:
bad.append(tt_id)
if verbose:
print "Of %i ids, %i were OK (mapped to tree)" %(len(trait_table_ids),len(good))
print "Example good ids",good[0:min(len(good),10)]
print "Example bad ids",bad[0:min(len(bad),10)]
print "Example tip ids",nodes[0:min(len(nodes),10)]
return good,bad
def filter_table_by_presence_in_tree(tree,trait_table_fields,name_field_index = 0,delimiter="\t"):
"""yield lines of a trait table lacking organisms missing from the tree"""
tree_tips = [str(node.Name.strip()) for node in tree.preorder()]
#print tree_tips
result_fields = []
for fields in trait_table_fields:
curr_name = fields[name_field_index].strip()
if curr_name not in tree_tips:
#print curr_name,"could not be found in tree nodes"
#print curr_name in tree_tips
#try:
# print int(curr_name) in tree_tips
#except:
# pass
#print curr_name.strip() in tree_tips
continue
result_fields.append(fields)
return result_fields
def make_translate_conversion_fn(translation_dict):
"""Return a new function that replaces values in input values with output_value
translation_dict -- a dict that maps inputs that should be translated to
their appropriate output
"""
def translate_conversion_fn(trait_value_field):
# Return translation, or the original value if no translation
# is available
try:
trait_value_field = trait_value_field.strip()
except AttributeError:
trait_value_field = str(trait_value_field).strip()
result = translation_dict.get(trait_value_field,trait_value_field)
#print trait_value_field
#print translation_dict.keys()
if result in translation_dict.keys():
raise RuntimeError("failed to translate value: %s" % result)
return str(result)
return translate_conversion_fn
def make_char_translation_fn(translation_dict,deletion_chars=''):
"""Return a new function that replaces values in input values with output_value
translation_dict -- a dict that maps inputs that should be translated to
their appropriate output
"""
def translate_conversion_fn(trait_value_field):
# Return translation, or the original value if no translation
# is available
trait_value_field = str(trait_value_field).strip()
from_chars = ''
to_chars = ''
for k,v in translation_dict.items():
from_chars += k
to_chars += v
translation_table = maketrans(from_chars,to_chars)
#print trait_value_field
#print translation_dict.keys()
result = trait_value_field.translate(translation_table,deletion_chars)
if result in translation_dict.keys():
raise RuntimeError("failed to translate value: %s" % result)
return str(result)
return translate_conversion_fn
def remove_spaces(trait_label_field):
"""A conversion function that replaces spaces with underscores in a label
"""
label = str(trait_label_field)
fields = trait_label_field.lstrip().strip().split()
return "_".join(fields)
def convert_trait_table_entries(trait_table_fields,\
label_conversion_fns=[str],value_conversion_fns = [float]):
"""Convert trait values by running conversion_fns on labels and values
trait_table_fields -- list of strings (from a trait table line)
the first field is assumed to be an organism name, and so isn't
formatted.
label_conversion_fns -- a list of functions to be run on each
organism name label (in the order they should be run). Each
function should need only a single entry as input, and output
the resulting label
value_conversion_fns -- another list of functions, but for
trait values. Again these will be run in order on each table
value.
"""
name_field_index = 0
#print "Value conversion fns:",[f.__name__ for f in value_conversion_fns]
#print "label_conversion_fns:",[f.__name__ for f in label_conversion_fns]
for fields in trait_table_fields:
new_fields = []
for i,field in enumerate(fields):
if i != name_field_index:
converters_to_use = value_conversion_fns
else:
converters_to_use = label_conversion_fns
#Run appropriate converters on this field
new_val = field
for curr_conv_fn in converters_to_use:
new_val = str(curr_conv_fn(new_val))
new_fields.append(new_val)
yield new_fields
def ensure_root_is_bifurcating(tree,root_name='root',verbose=False):
"""Remove child node of root if it is a single child"""
root_node = tree.getNodeMatchingName(root_name)
if len(root_node.Children) == 1:
if verbose:
print "Rerooting to avoid monotomy at root"
tree = tree.rootedAt(root_node.Children[0].Name)
#tree.remove(root_node)
tree.prune()
return tree
def filter_tree_tips_by_presence_in_table(tree,trait_table_fields,name_field_index = 0,\
delimiter="\t",verbose=True):
"""yield a tree lacking organisms missing from the trait table
trait_table_fields -- a list of lists, containing the results of parsing the data
lines of the trait table. Each set of fields in the list should contain the organism name
at index 0, and data values for the various traits at other positions
"""
org_ids_in_trait_table = []
new_tree = tree.deepcopy()
for fields in trait_table_fields:
curr_org = fields[name_field_index].strip()
org_ids_in_trait_table.append(curr_org)
# Build up a list of tips to prune
tips_to_prune = []
tips_not_to_prune = []
n_tips_not_to_prune = 0
for tip in tree.iterTips():
if tip.Name.strip() not in org_ids_in_trait_table:
tips_to_prune.append(tip.Name)
else:
n_tips_not_to_prune += 1
tips_not_to_prune.append(tip.Name)
if verbose and tips_to_prune:
print "Found %i tips to prune." %(len(tips_to_prune))
print "Example pruned tree tip names:",tips_to_prune[0:min(len(tips_to_prune),10)]
print "Example valid org ids:",org_ids_in_trait_table[0:min(len(org_ids_in_trait_table),10)]
if not n_tips_not_to_prune:
raise RuntimeError(\
"filter_tree_tips_by_presence_in_table: operation would remove all tips. Is this due to a formatting error in inputs?")
if verbose:
print "%i of %i tips will be removed (leaving %i)" %(len(tips_to_prune),\
n_tips_not_to_prune + len(tips_to_prune), n_tips_not_to_prune)
print "Example tips that will be removed (first 10):\n\n%s" % \
tips_to_prune[0:min(len(tips_to_prune),10)]
new_tree = get_sub_tree(tree,tips_not_to_prune)
return new_tree
def get_sub_tree(tree,tips_not_to_prune):
"""Get sub tree, modifying recursion limit if necessary"""
try:
new_tree = tree.getSubTree(tips_not_to_prune)
except RuntimeError:
#NOTE: getSubTree will hit
#maximum recursion depth on large trees
#Try working around this issue with a large
#recursion depth limit
old_recursion_limit = getrecursionlimit()
setrecursionlimit(50000)
new_tree = tree.getSubTree(tips_not_to_prune)
setrecursionlimit(old_recursion_limit)
return new_tree
def print_node_summary_table(input_tree):
"""Print a summary of the name,children,length, and parents of each node"""
for node in input_tree.postorder():
if node.Parent:
parent_name = node.Parent.Name
else:
parent_name = None
yield "\t".join(map(str,[node.Name,len(node.Children),node.Length,parent_name]))
def add_to_filename(filename,new_suffix,delimiter="_"):
"""Add to a filename, preserving the extension"""
filename, ext = splitext(filename)
new_filename = delimiter.join([filename,new_suffix])
return "".join([new_filename,ext])
def make_id_mapping_dict(tree_to_trait_mappings):
"""Generates trait_to_tree mapping dictionary from a list of mapping tuples
mappings -- in the format tree_id, trait_id
"""
trait_to_tree_mapping_dict = {}
for tree_id,trait_id in tree_to_trait_mappings:
trait_to_tree_mapping_dict[trait_id] = tree_id
return trait_to_tree_mapping_dict
def parse_id_mapping_file(file_lines,delimiter="\t"):
"""Parse two-column id mapping file, returning a generator of fields"""
for line in file_lines:
yield line.strip().split(delimiter)
def remap_trait_table_organisms(trait_table_fields,trait_to_tree_mapping_dict,verbose=False):
"""Yield trait table fields with organism ids substituted using the mapping dict
An iterator containing lists for each trait. The first field in each list
should be the organism id, and the rest should be trait values.
"""
remapped_fields = []
bad_ids = []
default_total = 0
#if verbose:
# print trait_to_tree_mapping_dict
# print sorted(list(set(trait_to_tree_mapping_dict.keys())))
for fields in trait_table_fields:
try:
fields[0] = trait_to_tree_mapping_dict[fields[0]]
except KeyError:
bad_ids.append(fields[0])
continue
remapped_fields.append(fields)
if verbose and bad_ids:
print "%i of %i trait table ids could not be mapped to tree" %(len(bad_ids),len(remapped_fields))
print "Example trait table ids that could not be mapped to tree:" %(bad_ids[:min(len(bad_ids),10)])
return remapped_fields
def load_picrust_tree(tree_fp, verbose=False):
"""Safely load a tree for picrust"""
#PicrustNode seems to run into very slow/memory intentsive perfromance...
#tree = DndParser(open(opts.input_tree),constructor=PicrustNode)
tree = DndParser(open(tree_fp),constructor=PicrustNode)
label_conversion_fns = set_label_conversion_fns(verbose=verbose)
tree = fix_tree_labels(tree,label_conversion_fns)
return tree
def load_tab_delimited_trait_table(trait_table_fp,verbose=False):
"""Load a tab delimited trait table for picrust"""
input_trait_table = open(trait_table_fp,"U")
if verbose:
print "Parsing trait table..."
#Find which taxa are to be used in tests
#(by default trait table taxa)
trait_table_header,trait_table_fields = \
parse_trait_table(input_trait_table)
label_conversion_fns = set_label_conversion_fns(verbose=verbose)
trait_table_fields = convert_trait_table_entries(trait_table_fields,\
value_conversion_fns = [],\
label_conversion_fns = label_conversion_fns)
trait_table_fields = [t for t in trait_table_fields]
if verbose:
print "Number of trait table fields with single quotes:",\
len([t for t in trait_table_fields if "'" in t[0]])
return trait_table_header,trait_table_fields
|
wasade/picrust
|
picrust/format_tree_and_trait_table.py
|
Python
|
gpl-3.0
| 28,426
|
# -*- coding: utf-8 -*-
#
# python-gnupg documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 5 22:38:47 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import psutil
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('./../'))
sys.path.insert(0, os.path.abspath('.'))
# -- Autodoc settings ----------------------------------------------------------
## trying to set this somewhere...
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'show-inheritance', 'undoc-members', 'show-hidden']
autoclass_content = 'both'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinxcontrib.fulltoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_static']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gnupg'
copyright = u'2013-2014, Isis Agora Lovecruft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from pretty_bad_protocol import gnupg
version = gnupg.__version__
# The full version, including alpha/beta/rc tags.
release = gnupg.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%d %B %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
html_theme = 'scrolls'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'pyramid'
html_theme = 'agogo'
#html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# 'stickysidebar': 'true',
# 'rightsidebar':'true',
'nosidebar': 'false',
# 'full_logo': 'false'
'sidebarwidth': '300'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "gnupg: Python Module Documentation"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%A, %d %B %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'gnupgdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python-gnupg.tex', u'python-gnupg Documentation',
u'Isis Agora Lovecruft', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gnupg Python Module Docs', u'gnupg Python Module Documentation',
[u'Isis Agora Lovecruft'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python-gnupg', u'python-gnupg Documentation',
u'Isis Agora Lovecruft', 'python-gnupg', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'python-gnupg'
epub_author = u'Isis Agora Lovecruft'
epub_publisher = u'Isis Agora Lovecruft'
epub_copyright = u'2013, Isis Agora Lovecruft'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
isislovecruft/python-gnupg
|
docs/conf.py
|
Python
|
gpl-3.0
| 10,098
|
import cv2
import numpy as np
import os
from vilay.core.Descriptor import MediaTime, Shape
from vilay.detectors.IDetector import IDetector
from vilay.core.DescriptionScheme import DescriptionScheme
class FaceDetector(IDetector):
def getName(self):
return "Face Detector"
def initialize(self):
# define haar-detector file
print os.getcwd() + '/vilay/detectors/FaceDetector/haarcascade_frontalface_default.xml'
self.cascade = cv2.CascadeClassifier(os.getcwd() + '/vilay/detectors/FaceDetector/haarcascade_frontalface_default.xml')
def detect(self, mediaTimes, tgtDS, film, rootDS, mainGUI):
for mediaTime in mediaTimes:
for frameIdx in range(mediaTime.startTime, mediaTime.startTime + mediaTime.duration):
actFrame = film.getFrame(frameIdx)
# preprocessing
actFrame = cv2.cvtColor(actFrame, cv2.cv.CV_BGR2GRAY)
actFrame = cv2.equalizeHist(actFrame)
# detect faces
faces = self.cascade.detectMultiScale(actFrame, 1.2, 3, 0, (5,5))
# create ds and add time and shape descriptor
for faceIdx in range(len(faces)):
[x,y,width,height] = faces[faceIdx,:]
ds = DescriptionScheme('RTI', 'Face Detector')
region = Shape('Face Detector','rect', np.array([[x, y], [x + width, y + height]]))
mediaTime = MediaTime('Face Detector', frameIdx, 1)
tgtDS.addDescriptionScheme(ds)
ds.addDescriptor(region)
ds.addDescriptor(mediaTime)
|
dakot/vilay-detect
|
vilay/detectors/FaceDetector.py
|
Python
|
gpl-3.0
| 1,782
|
#!/usr/bin/python
########################################################################## RAD4SNPs:##############################################################################
# A set of Python scripts to select and validate independent SNPs markers from a list of read files #
##################################################################################################################################################################
# MAIN PROGRAM
# Authors: G.LASSALLE (gilles.lassalle@inra.fr) & C.DELORD (chrystelle.delord@inra.fr)
# Last update: AUGUST 2017
#################### PRE-CONDITIONS
#- [-i] Working directory where to store results of the pipeline for the focal species X
#- [-d] exact name of MySQL database where denovo_map.pl Stacks data are available for the focal species X
#- [-i1] single-end reads (reads 1) for focal species X duplicate 1
#- [-i2] single-end reads (reads 1) for focal species X duplicate 2
#- [-i3] paired-end reads (reads 2) for focal species X duplicate 1
#- [-i4] paired-end reads (reads 2) for focal species X duplicate 2
#- BWA and SAMtools available
#- Connexion to the Stacks MySQL database available: databases of Stacks 'denovo_map output' for each species.
###############################################################################
import argparse
import os
import sys
import MySQLdb
###############################################################################
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store', dest='InputDir', help='Working Directory')
parser.add_argument('-d', action='store', dest='database', help='Stacks database')
parser.add_argument('-c', action='store', dest='CodeSp', help='ID of the species')
parser.add_argument('-i1', action='store', dest='R11', help='First R1 file')
parser.add_argument('-i2', action='store', dest='R12', help='Second R1 file')
parser.add_argument('-i3', action='store', dest='R21', help='First R2 file')
parser.add_argument('-i4', action='store', dest='R22', help='Second R2 file')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
results = parser.parse_args()
print 'input directory =', results.InputDir
##############################################################################
# Arguments testing
##############################################################################
if results.InputDir:
if os.path.isdir(results.InputDir):
print "Working directory is valid."
else :
print "Caution: working directory is invalid, please ckeck [-i]."
sys.exit()
else :
print "Please insert path for working directory [-i]. End of program."
sys.exit()
##############################################################################
if results.database:
db = MySQLdb.connect(host="", # your host, usually localhost
user="", # your username
passwd="", # your password
db=results.database) # name of the database
cur1= db.cursor() # connexion
print "Currently working on MySQL database: "+str(results.database)
else:
print "Incorrect ID for database: database not found, please check [-d]"
sys.exit()
###############################################################################
#
if results.R11:
if os.path.isfile(results.R11):
print "First file of single-end reads: found."
else :
print "Path to single-end reads data is not a file: please check out [-i1]."
sys.exit()
else :
print "Please insert path to single-end read files [-i1]. End of program."
sys.exit()
#
if results.R12:
if os.path.isfile(results.R12):
print "Second file of single-end reads: found."
else :
print "Path to single-end reads data is not a file: please check out [-i2]."
sys.exit()
else :
print "Please insert path to single-end read files [-2]. End of program."
sys.exit()
#
if results.R21:
if os.path.isfile(results.R21):
print "First file of paired-end reads: found."
else :
print "Path to paired-end reads data is not a file: please check out [-i3]."
sys.exit()
else :
print "Please insert path to paired-end read files [-i3]. End of program."
sys.exit()
#
if results.R22:
if os.path.isfile(results.R22):
print "Second file of paired-end reads: found."
else :
print "Path to paired-end reads data is not a file: please check out [-i4]."
sys.exit()
else :
print "Please insert path to paired-end read files [-i4]. End of program."
sys.exit()
###############################################################################
if results.CodeSp:
CodeEspece=str(results.CodeSp)
if CodeEspece[:1]!="_":
CodeEspece=str(results.CodeSp)+str("_")
else:
CodeEspece="std_"
###############################################################################
WorkDir=os.path.abspath(results.InputDir) # Current working directory
FastaCatalog=str(WorkDir)+"/"+str(results.CodeSp)+"Catalog.fasta" # Formatting name of candidates fasta file -output of MySQL filtering
###############################################################################
# Main program
###############################################################################
if os.path.isfile("/usr/bin/bwa"):
print "BWA program is found."
else :
print "Cannot find BWA: please check out pipeline requirements."
sys.exit()
###samtools
if os.path.isfile("/usr/bin/samtools"):
print "SAMtools program is found."
else :
print "Cannot find SAMtools: please check out pipeline requirements."
sys.exit()
#####################################################
# Working directory writable
filepath = results.InputDir+'/file.txt'
try:
filehandle = open( filepath, 'w' )
except IOError:
sys.exit( 'Working directory is not accessible' + filepath )
###############################################################################
# Pipeline commands:
###############################################################################
#################################### FIRST FILTERING ##########################
print os.getcwd()
commandeExtractFasta="./RAD4SNPs_SQL2Fasta.py -o "+str(FastaCatalog)+" -d "+str(results.database)+" -c "+str(CodeEspece)
print "Extraction du fichier fasta"
print commandeExtractFasta
os.system(commandeExtractFasta)
############################## Fusion of single-end reads #####################
if results.R11:
if results.R12:
commandFusionR1="cat "+str(results.R11)+" "+str(results.R12)+" > "+str(WorkDir)+"/allR1.fq.gz"
else :
commandFusionR1="cp "+str(results.R11)+" "+str(WorkDir)+"/allR1.fq.gz"
#############################fin de fusion
############################## Fusion of paired-end reads #####################
if results.R21:
if results.R22:
commandFusionR2="cat "+str(results.R21)+" "+str(results.R22)+" > "+str(WorkDir)+"/allR2.fq.gz"
else :
commandFusionR2="cp "+str(results.R21)+" "+str(WorkDir)+"/allR2.fq.gz"
#################################### SECOND FILTERING (1) #####################
command1="bwa index "+str(FastaCatalog) # Indexing
command2="bwa mem -a -M "+str(FastaCatalog)+" "+str(WorkDir)+"/allR1.fq.gz > "+str(WorkDir)+"/PremierAlign.sam" # SE reads alignment
command3="samtools view -Sb "+str(WorkDir)+"/PremierAlign.sam | samtools sort - "+str(WorkDir)+"/PremierAlign1Sorted" # Conversion to bam file
command4="samtools view -F4 "+str(WorkDir)+"/PremierAlign1Sorted.bam > "+str(WorkDir)+"/PremierAlign1Sorted-F4.sam" # Elimination of unmapped SE reads
print "SE reads merging: "+str(commandFusionR1)
os.system(commandFusionR1)
print "PE reads merging: "+str(commandFusionR2)
os.system(commandFusionR2)
print "BWA indexing: "+str(command1)
os.system(command1)
print "Alignment: "+str(command2)
os.system(command2)
print "Conversion to bam file: "+str(command3)
os.system(command3)
print "Elimination of unmapped SE reads: "+str(command4)
os.system(command4)
print " ************************************************************************"
print " Second filtering (1) with default parameters "
print " ************************************************************************"
print os.getcwd()
commande5="./RAD4SNPs_SamFilter.py -i "+str(WorkDir)+"/PremierAlign1Sorted-F4.sam"
os.system(commande5)
Candidatfasta1=str(WorkDir)+"/PremierAlign1Sorted-F4R1Filtered.fa" # Obtention of incomplete SE-validated fasta file
if os.path.isfile(Candidatfasta1):
print "SE-validated fasta file about to be completed. Re-aligning to complete second filtering."
else :
sys.exit( '****ERROR**** A problem occurred. Please check out alignment outputs.')
#################################### SECOND FILTERING (2) #####################
command21="bwa index "+str(Candidatfasta1)
command22="bwa mem -a -M "+str(Candidatfasta1)+" "+str(WorkDir)+"/allR1.fq.gz > "+str(WorkDir)+"/SecondAlign.sam"
command23="samtools view -Sb "+str(WorkDir)+"/SecondAlign.sam | samtools sort - "+str(WorkDir)+"/SecondAlign1Sorted"
command25="samtools index "+str(WorkDir)+"/SecondAlign1Sorted.bam"
command25bis="samtools faidx "+str(Candidatfasta1)
command26="samtools mpileup -d 1000 -O --ff 4 -f "+str(Candidatfasta1) +" "+ str(WorkDir)+"/SecondAlign1Sorted.bam"+" > "+str(WorkDir)+"/CandidatsR1.pileup"
print "BWA indexing: "+str(command21)
os.system(command21)
print "Alignment: "+str(command22)
os.system(command22)
print "Conversion to bam file: "+str(command23)
os.system(command23)
print "Indexing of bam file: "+str(command25)
os.system(command25)
print "Indexing for pileup file: "+str(command25bis)
os.system(command25bis)
print "Construction of SE pileup file: "+str(command26)
os.system(command26)
print " ************************************************************************"
print " Second filtering (2) with default parameters "
print " ************************************************************************"
print os.getcwd()
command27="./RAD4SNPs_PileupFilter.py -i "+str(WorkDir)+"/CandidatsR1.pileup"
print "End of second filtering: elimination of flanking variants: "+str(command27)
os.system(command27)
command28="./RAD4SNPs_FinalSQLExtract.py -i"+str(WorkDir)+"/CandidatsR1NoMulti.txt -d "+str(results.database)+" -c "+str(CodeEspece)+" > "+str(WorkDir)+"/CandidatFin.fasta"
print "Complete SE-validated fasta file: "+str(command28)
os.system(command28)
command28bis="sed -i '1d' "+str(WorkDir)+"/CandidatFin.fasta"
os.system(command28bis)
#################################### THIRD FILTERING ##########################
CandidatFin=str(WorkDir)+"/CandidatFin.fasta"
if os.path.isfile(CandidatFin):
print "SE-validated fasta file is completed. Re-aligning to perform third filtering."
else :
sys.exit( '****ERROR**** A problem occurred. Please check out alignment and/or pileup outputs.')
command29="bwa index "+str(CandidatFin)
command30="bwa mem -a -M "+str(CandidatFin)+" "+str(WorkDir)+"/allR2.fq.gz > "+str(WorkDir)+"/ThirdAlign.sam"
command31="samtools view -Sb "+str(WorkDir)+"/ThirdAlign.sam | samtools sort - "+str(WorkDir)+"/ThirdAlign2Sorted"
command32="samtools index "+str(WorkDir)+"/ThirdAlign2Sorted.bam"
command32bis="samtools faidx "+str(CandidatFin)
command33="samtools mpileup -d 1000 -O --ff 4 -f "+str(CandidatFin)+" "+str(WorkDir)+"/ThirdAlign2Sorted.bam"+" > "+str(WorkDir)+"/Candidats3.pileup"
print "BWA indexing: "+str(command29)
os.system(command29)
print "Alignment: "+str(command30)
os.system(command30)
print "Conversion to bam file: "+str(command31)
os.system(command31)
print "Indexing of bam file: "+str(command32)
os.system(command32)
print "Indexing for pileup file: "+str(command32bis)
os.system(command32bis)
print "Construction of PE pileup file: "+str(command33)
os.system(command33)
print " ************************************************************************"
print " Third filtering with default parameters "
print " ************************************************************************"
print os.getcwd()
command34="./RAD4SNPs_PileupFilter.py -i "+str(WorkDir)+"/Candidats3.pileup"
print "End of third filtering: elimination of flanking variants: "+str(command34)
os.system(command34)
command35="./RAD4SNPs_FinalSQLExtract.py -i"+str(WorkDir)+"/CandidatsR2NoMulti.txt -d "+str(results.database)+" -c "+str(CodeEspece)+" > "+str(WorkDir)+"/SNPs_out.fasta"
print "Complete PE-validated fasta file: "+str(command35)
os.system(command35)
# End.
|
glassalle/Rad4Snps
|
RAD4SNPs_Main.py
|
Python
|
gpl-3.0
| 12,997
|
# -*- coding: utf-8 -*-
# Copyright 2010-2012 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 or, at your option, any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import commands
import pykolab
from pykolab import utils
from pykolab.translate import _
log = pykolab.getLogger('pykolab.cli')
conf = pykolab.getConf()
def __init__():
commands.register('user_info', execute, description="Display user information.")
def execute(*args, **kw):
from pykolab import wap_client
try:
user = conf.cli_args.pop(0)
except IndexError, errmsg:
user = utils.ask_question(_("User"))
# Create the authentication object.
# TODO: Binds with superuser credentials!
wap_client.authenticate(username=conf.get("ldap", "bind_dn"), password=conf.get("ldap", "bind_pw"))
user = wap_client.user_info(user)
print user
|
detrout/pykolab
|
pykolab/cli/cmd_user_info.py
|
Python
|
gpl-3.0
| 1,532
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')p9u&kcu@_(8u&-%4(m9!&4*82sx97zyl-!i#m9kic2lycj%0)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'demografia.apps.DemografiaConfig',
'dal',
'dal_select2',
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
#'input_mask',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'comunidad.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'comunidad.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '127.0.0.1',
'NAME': 'comunidad',
'PASSWORD': '123456',
'PORT': '5432',
'USER': 'postgres',
'SCHEMAS': 'public,demografia'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
SUIT_CONFIG = {
# header
'ADMIN_NAME': 'comunidad',
'HEADER_DATE_FORMAT': 'l, j. F Y',
'HEADER_TIME_FORMAT': 'H:i',
# forms
'SHOW_REQUIRED_ASTERISK': True, # Default True
'CONFIRM_UNSAVED_CHANGES': True, # Default True
# menu
'SEARCH_URL': '/admin/auth/user/',
'MENU_ICONS': {
'sites': 'icon-leaf',
'auth': 'icon-lock',
},
# 'MENU_OPEN_FIRST_CHILD': True, # Default True
'MENU_EXCLUDE': ('demografia.miembrohogar',),
# 'MENU': (
# 'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')},
# {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
# ),
# misc
'LIST_PER_PAGE': 20
}
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'index'
CACHE_BACKEND = 'simple:///'
AUTH_PROFILE_MODULE = "demografia.persona"
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
|
gvizquel/comunidad
|
comunidad/settings.py
|
Python
|
gpl-3.0
| 4,931
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import, print_function, division)
from copy import deepcopy
from itertools import combinations
class Cell:
def __init__(self):
self.value = 0
self.row = set()
self.col = set()
self.sq = set()
self.rm_values = set()
def isSet(self):
return self.value > 0
@property
def values(self):
if self.value:
return set()
else:
return set(range(1, 10)) - self.row - self.col - self.sq - self.rm_values
def set(self, val):
if val > 0:
if val not in self.row and val not in self.col and val not in self.sq:
self.value = val
self.row.add(val)
self.col.add(val)
self.sq.add(val)
else:
raise ValueError
def rm_value(self, val):
if isinstance(val, int):
self.rm_values.add(val)
elif isinstance(val, set):
self.rm_values |= val
def __repr__(self):
if self.value == 0:
return ' '
else:
return repr(self.value)
def carre(i,j):
return i//3+3*(j//3)
def are_neigh(i,j,k,l):
return (i==k) + (j==l) + (carre(i,j)==carre(k,l))
def coord(dim, i, k):
if dim==0:
return i, k
elif dim==1:
return k, i
elif dim==2:
return 3*(i%3)+k%3,3*(i//3)+k//3
class Sudoku:
def __init__(self, start=None): #(((0,)*9, )*9):
self.grid = { }
self.turns = 0
# Cells initialisation
for i in range(9):
# self.grid[i] = { }
for j in range(9):
self.grid[i,j] = Cell()
# Rows initialisation
for j in range(9):
row = set()
for i in range(9):
self.grid[i,j].row = row
# Columns initialisation
for i in range(9):
col = set()
for j in range(9):
self.grid[i,j].col = col
# Squares initialisation
for c in range(9):
sq = set()
for i in range(3):
for j in range(3):
self.grid[i+3*(c%3),j+3*(c//3)].sq = sq
if start:
for j, c in enumerate(start):
for i, v in enumerate(c):
try:
self.set(i, j, v)
except:
print('###', i, j, v)
raise
def __repr__(self):
result = '-'*25 + "\n"
for j in range(8, -1, -1):
line = ''
for i in range(0, 9, 3):
line += "| %r %r %r " % (tuple( self.grid[k,j] for k in range(i, i+3) ))
result += "%s|\n" % line
if not j%3:
result += '-'*25 + "\n"
return result.rstrip()
@property
def solved(self):
return all( [ self.grid[i,j].isSet() for i in range(9) for j in range(9) ] )
def set(self, i, j, val):
self.grid[i,j].set(val)
def rm_value(self, i, j, val):
self.grid[i,j].rm_value(val)
def neigh_values(self, x, y, coord=False):
row_result = set()
for i in range(9):
if i != x:
if coord:
row_result.add((i,y))
else:
row_result |= self.grid[i,y].values
col_result = set()
for j in range(9):
if j != y:
if coord:
col_result.add((x,j))
else:
col_result |= self.grid[x,j].values
sq_result = set()
for i in range(3):
for j in range(3):
if i != x%3 or j != y%3:
if coord:
sq_result.add((i+3*(x//3),j+3*(y//3)))
else:
sq_result |= self.grid[i+3*(x//3),j+3*(y//3)].values
if coord:
return row_result | col_result | sq_result
else:
return (row_result, col_result, sq_result)
def rech_solitaire_nu(self):
chgt = False
# Solitaire nu
for i in range(9):
for j in range(9):
l = self.grid[i,j].values
if len(l) == 1:
v = l.pop()
print("%d,%d -> %d |" % (i, j, v), end=' ')
self.set(i, j, v)
chgt = True
self.turns += 1
return chgt
def rech_solitaire_camoufle(self):
chgt = False
# Solitaire camouflé
for i in range(9):
for j in range(9):
l = self.grid[i,j].values
for a in ( l - x for x in self.neigh_values(i, j) ):
if len(a) == 1:
v = a.pop()
print("%d,%d => %d |" % (i, j, v), end=' ')
self.set(i, j, v)
chgt = True
self.turns += 1
break
return chgt
def rech_gpes_dominants(self):
chgt = False
for v in range(1, 10):
candidates = [ (i,j) for i in range(9) for j in range(9) if v in self.grid[i,j].values ]
for candidat in candidates:
for dim in (0, 1): # colonne/ligne
copains = [ a for a in candidates if a[dim]==candidat[dim] and are_neigh(*candidat,*a) >= 2 ]
candid_mince = [ a for a in candidates if a[dim]==candidat[dim] and a not in copains ]
candid_sq = [ a for a in candidates if carre(*a)==carre(*candidat) and a not in copains ]
if not candid_mince:
for cell in candid_sq:
print("%d,%d -> -%d |" % (*cell, v), end=' ')
self.rm_value(*cell, v)
chgt = True
self.turns += 1
elif not candid_sq:
for cell in candid_mince:
print("%d,%d -> -%d |" % (*cell, v), end=' ')
self.rm_value(*cell, v)
chgt = True
self.turns += 1
return chgt
def rech_gpes_nus(self):
chgt = False
candidates = [ (i,j,self.grid[i,j].values) for i in range(9) for j in range(9) if self.grid[i,j].values ]
for (i,j,v) in candidates:
current_gpe = [(i,j)]
for (k,l,m) in candidates:
if all([ 1 <= are_neigh(*g,k,l) <= 2 for g in current_gpe ]) and m <= v:
current_gpe.append((k,l))
if len(current_gpe) == len(v):
for (k,l,m) in candidates:
intersect = m&v
if all([ 1 <= are_neigh(*g,k,l) <= 2 for g in current_gpe ]) and intersect:
print("%d,%d => -%s |" % (k,l,intersect), end=' ')
self.rm_value(k,l,intersect)
chgt = True
self.turns += 1
return chgt
def rech_gpes_camoufles(self):
chgt = False
candidates = [ (i,j,self.grid[i,j].values) for i in range(9) for j in range(9) ]
values_count = ( # col, lig, sq
{ i: {j: set() for j in range(1, 10)} for i in range(9)},
{ i: {j: set() for j in range(1, 10)} for i in range(9)},
{ i: {j: set() for j in range(1, 10)} for i in range(9)},
)
for (i, j, values) in candidates:
for v in values:
values_count[0][i][v].add((i,j))
values_count[1][j][v].add((i,j))
values_count[2][carre(i,j)][v].add((i,j))
for dim in (0, 1, 2): # colonne/ligne/carré
for k in range(9):
count_values = [ {'vals': set((v, )), 'cells': c} for (v,c) in values_count[dim][k].items() if len(c) > 1 ]
# len(c) = 0 correspond aux valeurs fixées. Et 1 au solitaire nu...
all_combinations = []
for n in range(1,5): # On limite au quatuor (si un quintet existe, il y aura aussi un quatuor complémentaire (5+4=9 cases)
all_combinations += combinations(count_values, n)
all_count_values = []
for glop in all_combinations:
tmp = {'vals': set(), 'cells': set() }
for plop in glop:
tmp['vals'] |= plop['vals']
tmp['cells'] |= plop['cells']
all_count_values.append(tmp)
for result in all_count_values:
if result['vals'] and len(result['cells'])==len(result['vals']):
for cell in result['cells']:
diff = self.grid[cell].values - result['vals']
if diff:
print("%d,%d ~> -%s |" % (*cell, diff), end=' ')
self.rm_value(*cell, diff)
chgt = True
self.turns += 1
return chgt
def rech_reseaux(self):
chgt = False
for v in range(1, 10):
candidates = [ (i,j) for i in range(9) for j in range(9) if v in self.grid[i,j].values ]
for dim in (0, 1): # colonne/ligne
other_dim = int(not dim)
current_dims = { i: set() for i in range(9) }
for a in candidates:
current_dims[a[dim]].add(a[other_dim])
all_combinations = []
for n in range(1,5): # On limite au quatuor (si un quintet existe, il y aura aussi un quatuor complémentaire (5+4=9 cases)
all_combinations += combinations([ ({i}, current_dims[i]) for i in current_dims if current_dims[i] ], n)
for combin in all_combinations:
current_dim = set()
current_other_dim = set()
for c in combin:
current_dim |= c[0]
current_other_dim |= c[1]
if len(current_dim) == len(current_other_dim):
for a in [ a for a in candidates if a[dim] not in current_dim and a[other_dim] in current_other_dim ]:
print("%d,%d *> -%d |" % (*a, v), end=' ')
self.grid[a].rm_value(v)
chgt = True
self.turns += 1
return chgt
def solve(self):
# https://fr.wikibooks.org/wiki/Résolution_de_casse-têtes/Résolution_du_sudoku
chgt = (True, )
while not self.solved and any(chgt):
chgt = (
self.rech_solitaire_nu(),
self.rech_solitaire_camoufle(),
)
if not any(chgt):
chgt = (
self.rech_gpes_dominants(),
self.rech_gpes_nus(),
self.rech_gpes_camoufles(),
self.rech_reseaux(),
)
#print("\n%r" % self)
#raw_input("Press Enter to continue...")
print("\n%r\n###### Résolu: %s en %d coups #######" % (self, self.solved, self.turns))
# if not self.solved:
# print([ (i,j,self.grid[i,j].values) for i in range(9) for j in range(9) ])
|
albatros69/Divers
|
sudoku.py
|
Python
|
gpl-3.0
| 11,551
|
#!/usr/bin/env python
import os
import sys
import glob
import argparse
from datetime import datetime
import platform
if platform.system().lower() == 'darwin':
os.environ['PYTHONPATH'] = '%s/osx_libs:$PYTHONPATH' % os.getcwd()
import wormtable as wt
################################################################################
# This script allows the user to filter variants in a vcf file based on one or
# more genes of interest. Genes can be provided as a comma-separated string or
# as a text file, with one gene per line. The query can be either positive (keep
# variants annotated to any of the input genes) or negative (keep variants not
# annotated to any of the input genes).
################################################################################
def parse_args():
"""
Parse the input arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest = 'inp_folder', required = True,
help = 'input folder containing the several wormtables')
parser.add_argument('-o', dest = 'out_file', required = True,
help = 'output file [.txt]')
parser.add_argument('-g', dest = 'genes_to_query', required = True,
help = 'genes of interest [comma-sep. string or file ' +
'path]')
parser.add_argument('-f', dest = 'field_name', required = True,
help = 'field where gene names have to be searched')
parser.add_argument('-n', dest = 'negative_query', required = True,
help = 'is this a negative query? [True or False]')
parser.add_argument('-p', dest = 'previous_results', required = False,
help = 'previously saved results from another query ' +
'[.txt]')
args = parser.parse_args()
return args
def check_input_file(folder_name):
"""
Make sure that the input file's path is properly defined.
"""
if not os.path.exists(folder_name):
sys.stderr.write("\nFolder named '" + folder_name + "' does not exist.\n")
sys.exit()
return folder_name
def check_output_file(file_name):
"""
Make sure that the input file's path does not already exist.
"""
if os.path.exists(file_name):
sys.stderr.write("\nFile named '" + file_name + "' already exists.\n")
sys.exit()
return file_name
def store_genes(genes_to_query):
"""
Store all input gene names in a set. If the path of genes_to_query does not
exist, it will treat genes_to_query as a string.
"""
genes = set()
# genes_to_query is a text file
if os.path.exists(genes_to_query):
f = open(genes_to_query)
for line in f:
genes.add(line.strip('\n'))
f.close()
# genes_to_query is a comma-separated string
else:
genes = set(genes_to_query.split(','))
return genes
def get_variants_assoc_to_gene_set_from_previous_results(inp_folder, genes,
field_name, negative_query, previous_results):
"""
Open the field_name wormtable (assumed to be named 'inp_folder/field_name.wt')
within inp_folder and return a set of all row IDs where at least one gene from
genes is found. Use ids from previous_results as starting point to further
filter the data and to make it faster.
If negative_query is True, only variants NOT containing any of the input genes
in field_name will be returned; if False, viceversa (positive query is run).
"""
# extract row IDs to check from previous_results (which is a file path) and
# store them in a set; NOTE: it assumes previous_results has a 1-line header,
# is tab-separated and row_id is the left-most field!
ids_to_check = set()
f = open(previous_results)
header = True
for line in f:
if header:
header = False
else:
ids_to_check.add(int(line.split('\t')[0]))
f.close()
# open wormtable for the field of interest
table = wt.open_table(inp_folder + '/' + field_name + '.wt',
db_cache_size='4G')
index = table.open_index('row_id')
all_ids = set()
pos_ids = set()
# NOTE: it assumes the wormtable has only two columns: 'row_id' and field_name
row_id_idx = 0
field_name_idx = 1
for row in index.cursor(['row_id', field_name]):
if row[row_id_idx] in ids_to_check:
all_ids.add(row[row_id_idx])
for value in row[field_name_idx].split(','):
for gene in genes:
if value.find(gene) != -1:
pos_ids.add(row[row_id_idx])
break
# close table and index
table.close()
index.close()
# if "negative_query" is True, return all row IDs which are not in "pos_ids"
if negative_query == 'True':
neg_ids = all_ids - pos_ids
return neg_ids
elif negative_query == 'False':
return pos_ids
def get_variants_assoc_to_gene_set(inp_folder, genes, field_name,
negative_query):
"""
Open the field_name wormtable (assumed to be named 'inp_folder/field_name.wt')
within inp_folder and return a set of all row IDs where at least one gene from
genes is found.
If negative_query is True, only variants NOT containing any of the input genes
in field_name will be returned; if False, viceversa (positive query is run).
"""
# open wormtable for the field of interest
table = wt.open_table(inp_folder + '/' + field_name + '.wt',
db_cache_size='4G')
all_ids = set()
pos_ids = set()
# NOTE: it assumes the wormtable has only two columns: 'row_id' and field_name
row_id_idx = 0
field_name_idx = 1
for row in table.cursor(['row_id', field_name]):
all_ids.add(row[row_id_idx])
for value in row[field_name_idx].split(','):
for gene in genes:
if value.find(gene) != -1:
pos_ids.add(row[row_id_idx])
break
# close table
table.close()
# if "negative_query" is True, return all row IDs which are not in "pos_ids"
if negative_query == 'True':
neg_ids = all_ids - pos_ids
return neg_ids
elif negative_query == 'False':
return pos_ids
def retrieve_variants_by_rowid(inp_folder, ids, out_file):
"""
Use the row IDs in ids to query the complete wormtable (containing all variant
fields) and return all the information about the filtered variants.
"""
# open table and load indices
table = wt.open_table(inp_folder + '/schema.wt', db_cache_size='4G')
index = table.open_index('row_id')
# retrieve the rows using the 'row_id' field and write the results in out_file
col_names = [col.get_name() for col in table.columns()]
row_id_idx = col_names.index('row_id')
out = open(out_file, 'w')
out.write('\t'.join(col_names) + '\n')
for row in index.cursor(col_names):
if row[row_id_idx] in ids:
to_write = list()
for value in row:
try: # value is a number (int or float)
to_write.append(int(value))
except TypeError, e: # value is a tuple
if value is not None:
to_write.append(','.join([str(x) for x in value]))
else:
to_write.append(None)
except ValueError, e: # value is a string
to_write.append(value)
except:
to_write.append(None)
out.write('\t'.join([str(x) for x in to_write]) + '\n')
out.close()
# close table and index
table.close()
index.close()
return
def script07_api_call(i_folder, o_file, genes_to_query, field_name,
negative_query, previous_results = None):
"""
API call for web-based and other front-end services, to avoid a system call
and a new Python process.
"""
t1 = datetime.now()
inp_folder = check_input_file(i_folder)
out_file = check_output_file(o_file)
negative_query = str(negative_query).lower()
if negative_query.startswith('t'):
negative_query = 'True'
else:
negative_query = 'False'
genes = store_genes(genes_to_query)
if previous_results != None:
ids = get_variants_assoc_to_gene_set_from_previous_results(inp_folder,
genes, field_name, negative_query, previous_results)
else:
ids = get_variants_assoc_to_gene_set(inp_folder, genes, field_name,
negative_query)
retrieve_variants_by_rowid(inp_folder, ids, out_file)
t2 = datetime.now()
sys.stderr.write('%s\n' % str(t2 - t1))
return
def main():
"""
Main function.
"""
args = parse_args()
script07_api_call(args.inp_folder, args.out_file, args.genes_to_query,
args.field_name, args.negative_query, args.previous_results)
if __name__ == '__main__':
main()
|
BSGOxford/BrowseVCF
|
web/scripts/script07_use_gene_list.py
|
Python
|
gpl-3.0
| 8,403
|
"""All things that are specifically related to adinebook website"""
from collections import defaultdict
from logging import getLogger
from typing import Optional
from langid import classify
from regex import compile as regex_compile
from requests import RequestException
from mechanicalsoup import StatefulBrowser
from lib.commons import first_last, dict_to_sfn_cit_ref, request, USER_AGENT,\
LANG
ISBN_SEARCH = regex_compile(r'ISBN: </b> ([-\d]++)').search
DATE_SEARCH = regex_compile(
r'تاریخ نشر:</b>(?<year>\d\d)/(?<month>\d\d)/(?<day>\d\d)').search
PUBLISHER_SEARCH = regex_compile(
r'Publisher_ctl00_NameLabel" class="linkk">(.*?)</span>').search
VOLUME_SEARCH = regex_compile(r'\bجلد (\d+)').search
TITLE_SEARCH = regex_compile(r'BookTitle" class="h4">([^<]++)').search
AUTHORS_FINDALL = regex_compile(
r'rptAuthor_ctl\d\d_NameLabel" class="linkk">([^>:]++):([^<]++)<').findall
LOCATION_SEARCH = regex_compile(r'محل نشر:</b>([^<]++)<').search
def ketabir_scr(url: str, date_format='%Y-%m-%d') -> tuple:
"""Return the response namedtuple."""
dictionary = url2dictionary(url)
dictionary['date_format'] = date_format
if 'language' not in dictionary:
# Assume that language is either fa or en.
# Todo: give warning about this assumption?
dictionary['language'] = \
classify(dictionary['title'])[0]
return dict_to_sfn_cit_ref(dictionary)
def isbn2url(isbn: str) -> Optional[str]:
"""Return the ketab.ir book-url for the given isbn."""
browser = StatefulBrowser(user_agent=USER_AGENT)
browser.open('http://www.ketab.ir/Search.aspx')
browser.select_form()
browser['ctl00$ContentPlaceHolder1$TxtIsbn'] = isbn
browser.submit_selected()
first_link = browser.get_current_page().select_one('.HyperLink2')
if first_link is None:
return
return browser.absolute_url(first_link['href'])
def url2dictionary(ketabir_url: str) -> Optional[dict]:
try:
# Try to see if ketabir is available,
# ottobib should continoue its work in isbn.py if it is not.
r = request(ketabir_url)
except RequestException:
logger.exception(ketabir_url)
return
html = r.content.decode('utf-8')
d = defaultdict(lambda: None, cite_type='book')
d['title'] = TITLE_SEARCH(html)[1]
# initiating name lists:
others = []
authors = []
editors = []
translators = []
# building lists:
for role, name in AUTHORS_FINDALL(html):
if role == 'نويسنده':
authors.append(first_last(name))
elif role == 'مترجم':
translators.append(first_last(name))
elif role == 'ويراستار':
editors.append(first_last(name))
else:
others.append(('', f'{name} ({role})'))
if authors:
d['authors'] = authors
if others:
d['others'] = others
if editors:
d['editors'] = editors
if translators:
d['translators'] = translators
m = PUBLISHER_SEARCH(html)
if m:
d['publisher'] = m[1]
m = DATE_SEARCH(html)
if m:
if LANG != 'fa':
d['month'] = m['month']
d['year'] = '۱۳' + m['year']
else:
d['month'] = m['month']
d['year'] = '۱۳' + m['year']
m = ISBN_SEARCH(html)
if m:
d['isbn'] = m[1]
m = VOLUME_SEARCH(html)
if m:
d['volume'] = m[1]
m = LOCATION_SEARCH(html)
if m:
d['publisher-location'] = m[1]
return d
logger = getLogger(__name__)
|
5j9/yadkard
|
lib/ketabir.py
|
Python
|
gpl-3.0
| 3,580
|
import struct
import re
from .core import NamedItemList
from copy import deepcopy
_SINGLE_MEMBER_REGEX = re.compile(r"^[@=<>!]?([0-9]*)([xcbB\?hHiIlLqQnNefdspP])$")
def __isSingleMemberFormatString(format):
return bool(_SINGLE_MEMBER_REGEX.match(format))
def formatStringForMembers(members):
formatString = ""
for member in members:
if not isinstance(member, tuple):
raise TypeError("Member list items must be specified as tuples.")
if len(member) != 2:
raise ValueError("Member tuple must have two items.")
if not isinstance(member[0], str):
raise TypeError("Member name was not specified as a string.")
if len(member[0]) < 1:
raise ValueError("Member name must not be an empty string.")
if not isinstance(member[1], str):
raise TypeError("Member format was not specified as a string.")
if not __isSingleMemberFormatString(member[1]):
raise ValueError("Member '" + member[0] + "' format string '" + member[1] + "' is not valid for a single member.")
formatString += member[1]
return formatString
def dataSizeForMembers(members):
return struct.calcsize(formatStringForMembers(members))
def dataBlobFormatString(members):
formatString = formatStringForMembers(members)
length = struct.calcsize(formatString)
return str(length) + "B"
def flattenList(data):
flatData = []
for item in data:
if isinstance(item, list):
flatData += item
elif isinstance(item, tuple):
flatData += list(item)
else:
flatData.append(item)
return flatData
class DataStruct:
"""Holds the definition for a lump of binary data."""
def __init__(self, formatStr, startZeroed=False):
if not isinstance(formatStr, str):
raise TypeError("DataStruct defString must be a string.")
self.__format = formatStr
self.__data = []
self.__requiredSize = struct.calcsize(self.__format)
if startZeroed:
self.setToZero()
def setToZero(self):
self.parseBinaryData(b'\0' * self.binaryDataRequiredSize())
def parseBinaryData(self, binaryData, offset=0):
if not isinstance(binaryData, bytes):
raise TypeError("Binary data is not in byte format.")
data = list(struct.unpack_from(self.__format, binaryData, offset))
self.__data = self._processParsedData(data)
def exportBinaryData(self):
if len(self.__data) < 1:
raise ValueError("No data to export.")
return struct.pack(self.__format, *flattenList(self.__data))
def binaryDataRequiredSize(self):
return self.__requiredSize
def formatString(self):
return self.__format
def data(self):
return self.__data
def _processParsedData(self, data):
return data
def __repr__(self):
return repr(self.__data)
class DataStructMemberGroupInfo:
"""Analyses members of a data struct and computes which items should be
grouped together (eg. vectors, strings, etc.)."""
# Assumes that the format strings have been validated as per data struct requirements.
def __init__(self, members):
self.__members = members
# Number of items in the group for this member.
self.__groupCount = {}
# Original index in the incoming data at which this member group resides.
self.__originalIndex = {}
# Lambda to use to combine the group items. Non-existent if not applicable.
self.__combineFunc = {}
# Type of the group. Non-existent if not applicable.
self.__expectedType = {}
self.__processMembers()
# The following accessor functions use member indices, as members represented by the same format
# string may be referred to by different names.
def originalIndex(self, memberIndex):
return self.__originalIndex[memberIndex]
def groupCount(self, memberIndex):
return self.__groupCount[memberIndex]
def isGrouped(self, memberIndex):
return self.__groupCount[memberIndex] > 1
def combineFunc(self, memberIndex):
return self.__combineFunc[memberIndex]
def expectedGroupType(self, memberIndex):
return self.__expectedType[memberIndex]
def __processMembers(self):
dataIndex = 0
for memberIndex in range(0, len(self.__members)):
member = self.__members[memberIndex]
memberFormat = member[1]
self.__originalIndex[memberIndex] = dataIndex
# Set up some defaults that will get overridden if required.
self.__combineFunc[memberIndex] = lambda origItems: list(origItems)
self.__expectedType[memberIndex] = list
formatMatch = _SINGLE_MEMBER_REGEX.match(memberFormat)
groupCount = formatMatch.group(1)
groupType = formatMatch.group(2)
try:
if groupCount is None:
raise ValueError()
# This should never raise an exception, but the outer try will catch if it does.
groupCount = int(groupCount)
if groupCount < 2:
raise ValueError()
self.__groupCount[memberIndex] = groupCount
# Special case for handling strings:
if groupType == "c":
self.__combineFunc[memberIndex] = lambda origItems: b"".join(origItems)
self.__expectedType[memberIndex] = bytes
except Exception:
# If something goes wrong, this implies that the member has no group.
self.__groupCount[memberIndex] = 1
del self.__combineFunc[memberIndex]
del self.__expectedType[memberIndex]
dataIndex += self.__groupCount[memberIndex]
class NamedDataStruct(DataStruct):
"""Allows 'member names' for struct items."""
# The time taken to generate these for each instance every time one is created
# adds up. These are indexed by format string, so that we can check whether a
# group list for a set of members has already been created.
__cachedGroupInfoByFormatString = {}
# As an optimisation, the format string for the members can be passed in.
# This is not checked - it is assumed to accurately represent the list of members.
def __init__(self, members, startZeroed=False, formatString=None):
if not isinstance(members, list):
raise TypeError("Members must be specified as a list of 2-tuples.")
if formatString is None:
formatString = formatStringForMembers(members)
# This will do most of the validation, so we don't have to below.
# If we start zeroed, this must be done later after the members have been initialised.
super().__init__(formatString, False)
self.__rawMembers = members
self.__memberList = NamedItemList()
self.__memberGroupInfo = None
for member in members:
self.__memberList.append(member[0], member)
self.__generateMemberGroupInfo()
if startZeroed:
self.setToZero()
def valueByName(self, memberName):
if not isinstance(memberName, str):
raise TypeError("Member must be specified as a string.")
if not self.__memberList.hasItemWithName(memberName):
raise ValueError("Member '" + memberName + "' was not recognised.")
return self.valueByIndex(self.__memberList.nameToIndex(memberName))
def valueByIndex(self, index):
if not isinstance(index, int):
raise TypeError("Member index must be an integer.")
if index < 0 or index >= len(self):
raise ValueError("Member index " + str(index) + " is out of range.")
data = self.data()
if data is None:
raise ValueError("No member data has been set.")
return deepcopy(data[index])
def hasMemberName(self, name):
return self.__memberList.hasItemWithName(name)
def setValueByName(self, memberName, value):
if not self.hasMemberName(memberName):
raise ValueError(f"Member with name '{memberName}' does not exist.")
self.setValueByIndex(self.__memberList.nameToIndex(memberName), value)
def setValueByIndex(self, index, value):
data = self.data()
if len(data) < 1:
raise ValueError("Item is not yet initialised.")
if index < 0 or index >= len(data):
raise ValueError(f"Index {index} was out of range (expected 0-{len(data) - 1}).")
coercedValue = value
if isinstance(coercedValue, bytes):
coercedValue = [bytes([character]) for character in coercedValue]
else:
try:
# Attempt to intelligently convert to a list.
coercedValue = list(coercedValue)
except:
# Assume the value is singular and create a list instead.
coercedValue = [coercedValue]
member = self.__memberList.getItemByIndex(index)
memberFormat = member[1]
isGrouped = self.__memberGroupInfo.isGrouped(index)
if isGrouped:
groupCount = self.__memberGroupInfo.groupCount(index)
# If the type is bytes, pad the incoming data with zeroes.
if self.__memberGroupInfo.expectedGroupType(index) == bytes and \
isinstance(value, bytes) and \
len(coercedValue) < groupCount:
paddingLength = groupCount - len(coercedValue)
coercedValue += [bytes(character) for character in bytes(paddingLength)]
if len(coercedValue) != groupCount:
raise ValueError(f"Expected {groupCount} items for member format '{memberFormat}', but got {len(coercedValue)} items.")
# Try packing the data in order to validate it.
try:
struct.pack(memberFormat, *coercedValue)
except:
raise TypeError(f"Value type '{type(value)}' was incorrect. Expected member format: '{memberFormat}'.")
# If this member requires a group, use the coerced list.
# Otherwise, use the raw value (which should be singular).
data[index] = coercedValue if isGrouped else value
def __generateMemberGroupInfo(self):
# Member format strings have been validated so that they only reference one data type.
# Therefore, if the string contains a number > 1, this means it references an aggregate
# type (eg. a vector, string, etc.). We need to convert these into appropriate data types.
formatString = self.formatString()
# This function gets called every time an instance of this class is created, which is a lot.
# The time it takes to generate all of these can add up. Therefore, cache them once we have
# generated the data once.
if formatString in NamedDataStruct.__cachedGroupInfoByFormatString:
# Use cached version.
self.__memberGroupInfo = NamedDataStruct.__cachedGroupInfoByFormatString[formatString]
else:
# Create and cache.
self.__memberGroupInfo = DataStructMemberGroupInfo(self.__rawMembers)
NamedDataStruct.__cachedGroupInfoByFormatString[formatString] = self.__memberGroupInfo
def _processParsedData(self, data):
origDataList = data
newDataList = []
origIndex = 0
#for member in self.__memberList:
for memberIndex in range(0, len(self.__memberList)):
if self.__memberGroupInfo.isGrouped(memberIndex):
origIndex = self.__memberGroupInfo.originalIndex(memberIndex)
count = self.__memberGroupInfo.groupCount(memberIndex)
combineFunc = self.__memberGroupInfo.combineFunc(memberIndex)
# Generate a tuple as a group.
tupleToCopy = combineFunc(origDataList[origIndex : origIndex + count])
# Add this to the output list.
newDataList.append(tupleToCopy)
origIndex += len(tupleToCopy)
else:
# Just copy the date over verbatim.
newDataList.append(origDataList[origIndex])
origIndex += 1
return newDataList
def __getitem__(self, key):
if isinstance(key, str):
return self.valueByName(key)
elif isinstance(key, int):
return self.valueByIndex(key)
raise TypeError("Key is of an invalid type.")
def __setitem__(self, key, value):
if isinstance(key, str):
return self.setValueByName(key, value)
elif isinstance(key, int):
return self.setValueByIndex(key, value)
raise TypeError("Key is of an invalid type.")
def __len__(self):
return len(self.__memberList)
def __repr__(self):
return repr({self.__memberList.indexToName(index): self.valueByIndex(index) for index in range(0, len(self.__memberList))})
|
x6herbius/afterburner
|
tools/bsp/libbsp/structutils.py
|
Python
|
gpl-3.0
| 11,421
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import rdflib
g=rdflib.Graph()
g.load('http://dbpedia.org/resource/Semantic_Web')
for s,p,o in g:
print(s,p,o)
|
davidam/python-examples
|
rdflib/rdflib-example.py
|
Python
|
gpl-3.0
| 1,020
|
#!/usr/bin/env python
"""compares BSR values between two groups in a BSR matrix
Numpy and BioPython need to be installed. Python version must be at
least 2.7 to use collections"""
from optparse import OptionParser
import subprocess
from ls_bsr.util import prune_matrix
from ls_bsr.util import compare_values
from ls_bsr.util import find_uniques
import sys
import os
def test_file(option, opt_str, value, parser):
try:
with open(value): setattr(parser.values, option.dest, value)
except IOError:
print('%s file cannot be opened' % option)
sys.exit()
def add_headers(infile, outfile, lower, upper):
file_out = open(outfile, "w")
file_out.write("marker"+"\t"+"group1_mean"+"\t"+">="+str(upper)+"\t"+"total_in_group_1"+"\t"+">="+str(lower)+"\t"+"group2_mean"+"\t"+">="+str(upper)+"\t"+"total_in_group2"+"\t"+">="+str(lower)+"\n")
with open(infile) as my_file:
for line in my_file:
file_out.write(line)
file_out.close()
def main(matrix,group1,group2,fasta,upper,lower):
prune_matrix(matrix,group1,group2)
compare_values("group1_pruned.txt","group2_pruned.txt",upper,lower)
subprocess.check_call("paste group1_out.txt group2_out.txt > groups_combined.txt", shell=True)
find_uniques("groups_combined.txt",fasta)
add_headers("groups_combined.txt","groups_combined_header.txt",lower,upper)
os.system("rm group1_out.txt group2_out.txt")
if __name__ == "__main__":
usage="usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-b", "--bsr_matrix", dest="matrix",
help="/path/to/bsr_matrix [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-f", "--fasta", dest="fasta",
help="/path/to/ORF_fasta_file [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-1", "--group_1_ids", dest="group1",
help="new line separated file with group1 ids [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-2", "--group_2_ids", dest="group2",
help="new line separated file with group2 ids [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-u", "--upper_bound", dest="upper",
help="upper bound for BSR comparisons, defaults to 0.8",
default="0.8", type="float")
parser.add_option("-l", "--lower_bound", dest="lower",
help="lower bound for BSR comparisons, defaults to 0.4",
default="0.4", type="float")
options, args = parser.parse_args()
mandatories = ["matrix", "group1", "group2", "fasta"]
for m in mandatories:
if not options.__dict__[m]:
print("\nMust provide %s.\n" %m)
parser.print_help()
exit(-1)
main(options.matrix,options.group1,options.group2,options.fasta,options.upper,options.lower)
|
jasonsahl/LS-BSR
|
tools/compare_BSR.py
|
Python
|
gpl-3.0
| 3,059
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
from __future__ import absolute_import
from __future__ import unicode_literals
from .email import EmailMatcher
from .email_name import EmailNameMatcher
SORTINGHAT_IDENTITIES_MATCHERS = {
'default' : EmailMatcher,
'email' : EmailMatcher,
'email-name' : EmailNameMatcher,
}
|
pombredanne/sortinghat
|
sortinghat/matching/__init__.py
|
Python
|
gpl-3.0
| 1,244
|
"""
dwm package setup
"""
from __future__ import print_function
from setuptools import setup, find_packages
__version__ = '1.1.0'
def readme():
""" open readme for long_description """
try:
with open('README.md') as fle:
return fle.read()
except IOError:
return ''
setup(
name='dwm',
version=__version__,
url='https://github.com/rh-marketingops/dwm',
license='GNU General Public License',
author='Jeremiah Coleman',
tests_require=['nose', 'mongomock>=3.5.0'],
install_requires=['pymongo>=3.2.2', 'tqdm>=4.8.4'],
author_email='colemanja91@gmail.com',
description='Best practices for marketing data quality management',
long_description=readme(),
packages=find_packages(),
include_package_data=True,
platforms='any',
test_suite='nose.collector',
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
keywords='marketing automation data quality cleanse washing cleaning'
)
|
rh-marketingops/dwm
|
setup.py
|
Python
|
gpl-3.0
| 1,421
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
import math
def sig_source_f(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [amp*math.cos(2.*math.pi*freq*x) for x in t]
return y
def sig_source_c(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x) for x in t]
return y
class test_vco(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
src_data = 200*[0,] + 200*[0.5,] + 200*[1,]
expected_result = 200*[1,] + \
sig_source_f(1, 0.125, 1, 200) + \
sig_source_f(1, 0.25, 1, 200)
src = blocks.vector_source_f(src_data)
op = blocks.vco_f(1, math.pi / 2.0, 1)
dst = blocks.vector_sink_f()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5)
def test_002(self):
src_data = 200*[0,] + 200*[0.5,] + 200*[1,]
expected_result = 200*[1,] + \
sig_source_c(1, 0.125, 1, 200) + \
sig_source_c(1, 0.25, 1, 200)
src = blocks.vector_source_f(src_data)
op = blocks.vco_c(1, math.pi / 2.0, 1)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_vco, "test_vco.xml")
|
trabucayre/gnuradio
|
gr-blocks/python/blocks/qa_vco.py
|
Python
|
gpl-3.0
| 1,819
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-13 06:00
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MergeServer', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='results',
name='start_time',
field=models.TimeField(default=datetime.datetime.now),
),
]
|
zeqing-guo/SPAKeyManager
|
MergeServer/migrations/0002_auto_20160113_0600.py
|
Python
|
gpl-3.0
| 480
|
from django.core.urlresolvers import reverse
import django.http
import django.utils.simplejson as json
import functools
def make_url(request, reversible):
return request.build_absolute_uri(reverse(reversible))
def json_output(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
return django.http.HttpResponse(json.dumps(output),
content_type="application/json")
return wrapper
|
ukch/online_sabacc
|
src/sabacc/api/viewhelpers.py
|
Python
|
gpl-3.0
| 494
|
# coding: utf-8
import re
import os
import ast
import luigi
import psycopg2
import boto3
import random
import sqlalchemy
import tempfile
import glob
import datetime
import subprocess
import pandas as pn
from luigi import six
from os.path import join, dirname
from luigi import configuration
from luigi.s3 import S3Target, S3Client
from dotenv import load_dotenv,find_dotenv
from luigi.contrib import postgres
from compranet.pipelines.pipelines.utils.pg_compranet import parse_cfg_string, download_dir
from compranet.pipelines.pipelines.etl.elt_orchestra import CreateSemanticDB
# Variables de ambiente
load_dotenv(find_dotenv())
# Load Postgres Schemas
#temp = open('./common/pg_clean_schemas.txt').read()
#schemas = ast.literal_eval(temp)
# AWS
aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
class Model(luigi.Task):
"""
Clase intermedia que activa los scripts de modelado
"""
year_month = luigi.Parameter()
def requires(self):
return CreateSemanticDB(self.year_month)
def run(self):
yield MissingClassifier(self.year_month)
yield CentralityClassifier(self.year_month)
class CentralityClassifier(luigi.Task):
"""
Clase que corre las medidas de centralidad implementadas por
neo4j
"""
year_month = luigi.Parameter()
script = luigi.Parameter('DEFAULT')
type_script = luigi.Parameter()
def run(self):
# First upload data into neo4j
cmd = '''
cycli ./models/neo4j_scripts/upload.neo4j
'''
subprocess.call(cmd, shell=True)
# Run centrality meassures
cmd = '''
cycli ./models/neo4j_scripts/centrality.neo4j
'''
return subprocess.call(cmd, shell=True)
class MissingClassifier(luigi.Task):
"""
Clase que corre el índice de clasificación por missing values
"""
year_month = luigi.Parameter()
script = luigi.Parameter('DEFAULT')
def run(self):
cmd = '''
python {}/missing-classifier.py
'''.format(self.script)
return subprocess.call(cmd, shell=True)
|
rsanchezavalos/compranet
|
compranet/pipelines/models/model_orchestra.py
|
Python
|
gpl-3.0
| 2,147
|
import numpy as np
from definition import states_by_id
import pyproj as prj
class Route:
"""
A class of Breeding Bird Survey (BBS) route
Each Route includes the following members:
id - id number of the route
name - name of the route
stateID - to which state the route belongs
routeID - route ID inside each state
length - length published by USGS
path - 2D numpy array of point coordinates along the route
in default projection: EPSG:5070 (USGS standard).
!!! Note the points are not the stops of Bird Survey
!!! They are simply points to define a polyline of the route
path2 - path array converted to longitude/latitude
stops - 2D array of stop coordinates along the route
!!! Note the location of stops are calculated
!!! by distance from starting point along the route,
!!! currently hard-coded as every 800 meter (0.5 mile)
stops2 - stops array converted to longitude/latitude
stop_d - distance between stops, by standard should be around 800m,
but may vary a lot, currently I assume the 50 stops
distributed evenly along the route, i.e. stop_d = length / 49
rating - type of route (single or multiple)
TODO: specify details on this feature
"""
# header - for export and print use
header = '%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
'ID', 'State ID', 'State',
'Route ID', 'Route Name', 'Route Length', 'Route Rating'
)
def __init__(self, fields, records):
"""
Initialize a Route using information in the USGS shapefile.
fields: field label used in shapefile
records: a list of route part records in the shapefile
"single" route contains only one part
"multiple" route contains multiple parts, but can be
stitched together
"""
# load information from the fields in the shapefile
i = 1
while i < len(fields):
if fields[i][0] == 'rteno':
self.id = records[0].record[i - 1]
self.stateID = self.id // 1000
self.routeID = self.id % 1000
if fields[i][0] == 'RTENAME':
self.name = records[0].record[i - 1]
if fields[i][0] == 'rte_length':
self.length = float(records[0].record[i - 1])
i = i + 1
# generate a complete route path
if len(records) == 1:
self.rating = 'single'
self.path = np.array(records[0].shape.points)
self.path2 = self.to_lonlat(self.path)
else:
self.rating = 'multiple'
# generate a list of points in each segments
self.path = np.array(records[0].shape.points)
thresh = 10.0
i = 1
while i < len(records):
r = np.array(records[i].shape.points)
p1 = self.path[0]
p2 = self.path[-1]
s1 = r[0]
s2 = r[-1]
if np.linalg.norm(p2 - s1) < thresh:
self.path = np.vstack((self.path, r))
elif np.linalg.norm(p2 - s2) < thresh:
self.path = np.vstack((self.path, r[-1::-1]))
elif np.linalg.norm(s2 - p1) < thresh:
self.path = np.vstack((r, self.path))
elif np.linalg.norm(s1 - p1) < thresh:
self.path = np.vstack((r[-1::-1], self.path))
else:
self.rating = 'broken'
break
i = i + 1
self.path2 = self.to_lonlat(self.path)
# calculate 50 stops along the path
if self.rating is not 'broken':
self.stops, self.stop_d = self.calc_stop()
self.stops2 = self.to_lonlat(self.stops)
else:
self.stops = np.array(())
self.stops2 = np.array(())
self.stop_d = 0.0
# output Route summary
print(self.summary())
def to_lonlat(self, pts):
"""
Convert coordinate from EPSG:5070 to EPSG:4326 (Longitude/Latitide)
"""
new_pts = np.zeros_like(pts)
# define input/output projections of lon/lat transformation
inProj = prj.Proj(init='epsg:5070') # Albers Equal Area
outProj = prj.Proj(init='epsg:4326') # Lat/Long Geodetic System
for i in range(len(pts)):
x, y = pts[i]
lon, lat = prj.transform(inProj, outProj, x, y)
new_pts[i] = (lon, lat)
return new_pts
def calc_len(self):
"""
Calculate total length, segment length, x/y displacement of
each segment along the route.
"""
if self.rating is 'broken':
print('ERROR: Cannot calculate length for broken route')
exit(1)
# python list of lengths of segments (between stops)
segments = np.zeros((len(self.path) - 1))
dxy = np.zeros((len(self.path) - 1, 2))
total_len = 0.0
for i in range(1, len(self.path)):
# !!! Only apply to EPSG:5070
# !!! Poor man's method to calc distance between two points
# !!! TODO: change to advanced method to handle different
# !!! projections
p0 = self.path[i - 1]
p1 = self.path[i]
d = np.linalg.norm(p1 - p0)
dxy[i - 1] = p1 - p0
segments[i - 1] = d
total_len += d
return total_len, segments, dxy
def calc_stop(self):
"""
Calculate 50 stops along a BBS route.
"""
if self.rating is 'broken':
print('ERROR: Cannot calculate stops for broken route')
exit(1)
# calculate total path length and generate a list of segment lengths
length, segments, dxy = self.calc_len()
#
# TODO: check if calculated length matched by published data
#
# hard-coded number of stops
nstop = 50
# use the starting point as first stop
stops = np.zeros((50, 2))
stops[0] = self.path[0]
k = 1
# distance between each stop, more or less 800 meters
# TODO: warning if the value not close to 800 meters
# TODO: handle other projections (?)
dstop = length / (nstop - 1)
# "trip" counts how many meters traveled since the last stop
trip = 0.0
for i in range(len(segments)):
seg = trip + segments[i]
# the fraction of two parts that are split by the stop in the
# current segments, used to linearly interpolate stop coordinates
frac = 0.0
while seg >= dstop:
frac += (dstop - trip)
stops[k] = self.path[i] + frac / segments[i] * dxy[i]
k = k + 1
seg -= dstop
trip = 0.0
trip = seg
# handle the last stop
if k == nstop - 1:
stops[-1] = self.path[-1]
elif k < nstop - 1:
# TODO: is this necessary?
print('!!! %d - %s: Not enough stops found, k = %d'
% (self.id, self.name, k))
elif k > nstop:
# TODO: is this necessary?
print('!!! %d - %s: More stops found, k = %d'
% (self.id, self.name, k))
return stops, dstop
def summary(self):
"""
Summary string for print and export
"""
return '%d\t%d\t%s\t%d\t%s\t%f\t%s\n' % (
self.id, self.stateID, states_by_id[self.stateID].name,
self.routeID, self.name, self.length, self.rating
)
def export(self):
"""
Export route information to a CSV file.
"""
if self.rating is 'broken':
print('ERROR: exporting broken route')
exit(1)
with open('rte_' + str(self.id) + '.csv', 'w') as f:
f.write('sep=\t\n')
f.write(self.header)
f.write(self.summary())
f.write('====\t====\t====\tStops Info\t====\t====\t====\n')
f.write('----\tStop\tX\tY\tLongitude\tLatitude\t----\n')
for i in range(50):
x, y = self.stops[i]
lon, lat = self.stops2[i]
f.write('----\t%d\t%f\t%f\t%f\t%f\t----\n' % (
i + 1, x, y, lon, lat)
)
f.write('====\t====\t====\tPath Info\t====\t====\t====\n')
f.write('----\tPoint\tX\tY\tLongitude\tLatitude\t----\n')
for i in range(len(self.path)):
x, y = self.path[i]
lon, lat = self.path2[i]
f.write('----\t%d\t%f\t%f\t%f\t%f\t----\n' % (
i + 1, x, y, lon, lat)
)
|
piw/pyCDL
|
pyCDL/route.py
|
Python
|
gpl-3.0
| 9,010
|
from . import sqldb
class aminoAcid():
def __init__(self,abbr1,abbr3,name):
self.abbr3 = abbr3
self.abbr1 = abbr1
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return self.name
def getOne(self):
return self.abbr1
def getThree(self):
return self.abbr3
class aminoAcidDB():
def __init__(self):
self.db = sqldb.sqliteDB('bioChemData/data.sql','protein')
def getAA3(self,abbr3):
abbr1 = self.db.getItem(abbr3,'one')
name = self.db.getItem(abbr3,'name')
return aminoAcid(abbr1,abbr3,name)
class translateDB():
def __init__(self):
self.db = sqldb.sqliteDB('bioChemData/data.sql','translate')
def getAA3(self,codon):
return self.db.getItem(codon,'protein')
def codonTranslate(codon,codonDB,aaDB):
return aaDB.getAA3(codonDB.getAA3(codon))
def nucleotideTranslation(posStrand):
pointer = 0
result = ''
lastAA = 'M'
adb = aminoAcidDB()
cdb = translateDB()
while posStrand[pointer:pointer+3] != 'ATG' and pointer <= len(posStrand)-3:
pointer += 1
while pointer <= len(posStrand)-3 and lastAA != 'X':
lastAA = adb.getAA3(cdb.getAA3(posStrand[pointer:pointer+3])).getOne()
result += lastAA
pointer += 3
return result
|
StephDC/MiniBioKit
|
bioChemData/protein.py
|
Python
|
gpl-3.0
| 1,344
|
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from typing import Any, Tuple
from gi.repository import Gtk, Gdk
from opendrop.app import keyboard
from opendrop.app.common.image_processing.image_processor import ImageProcessorPluginViewContext
from opendrop.mvp import ComponentSymbol, View, Presenter
from opendrop.utility.bindable.gextension import GObjectPropertyBindable
from opendrop.geometry import Vector2, Line2
from opendrop.widgets.canvas import LineArtist, CircleArtist
from .model import DefineLinePluginModel
define_line_plugin_cs = ComponentSymbol() # type: ComponentSymbol[None]
@define_line_plugin_cs.view(options=['view_context', 'tool_id', 'color', 'z_index'])
class DefineLinePluginView(View['DefineLinePluginPresenter', None]):
def _do_init(
self,
view_context: ImageProcessorPluginViewContext,
tool_id: Any,
color: Tuple[float, float, float],
z_index: int,
) -> None:
self._view_context = view_context
self._tool_ref = view_context.get_tool_item(tool_id)
view_context.canvas.connect(
'cursor-up',
lambda canvas, pos: self.presenter.cursor_up(pos),
)
view_context.canvas.connect(
'cursor-down',
lambda canvas, pos: self.presenter.cursor_down(pos),
)
view_context.canvas.connect(
'cursor-motion',
lambda canvas, pos: self.presenter.cursor_move(pos),
)
view_context.canvas.connect(
'key-press-event',
self._hdl_canvas_key_press_event,
)
self.bn_tool_button_is_active = self._tool_ref.bn_is_active
self._canvas = view_context.canvas
self._defined_artist = LineArtist(
stroke_color=color,
stroke_width=1,
scale_strokes=True,
)
self._canvas.add_artist(self._defined_artist, z_index=z_index)
self._dragging_artist = LineArtist(
stroke_color=color,
stroke_width=1,
scale_strokes=True,
)
self._canvas.add_artist(self._dragging_artist, z_index=z_index)
self._control_point_artist = CircleArtist(
fill_color=color,
scale_radius=True,
)
self._canvas.add_artist(self._control_point_artist, z_index=z_index)
self.bn_defined = GObjectPropertyBindable(
g_obj=self._defined_artist,
prop_name='line',
)
self.bn_dragging = GObjectPropertyBindable(
g_obj=self._dragging_artist,
prop_name='line',
)
self.presenter.view_ready()
def show_control_point(self, xc: float, yc: float) -> None:
self._control_point_artist.props.xc = xc
self._control_point_artist.props.yc = yc
self._control_point_artist.props.radius = 2.0
def hide_control_point(self) -> None:
self._control_point_artist.props.radius = 0.0
def _hdl_canvas_key_press_event(self, widget: Gtk.Widget, event: Gdk.EventKey) -> bool:
self.presenter.key_press(
keyboard.KeyEvent(
key=keyboard.Key.from_value(event.keyval),
modifier=int(event.state)
)
)
# Stop event propagation.
return True
def _do_destroy(self) -> None:
self._canvas.remove_artist(self._defined_artist)
self._canvas.remove_artist(self._dragging_artist)
@define_line_plugin_cs.presenter(options=['model'])
class DefineLinePluginPresenter(Presenter['DefineLinePluginView']):
def _do_init(self, model: DefineLinePluginModel) -> None:
self._model = model
self.__data_bindings = []
self.__event_connections = []
def view_ready(self) -> None:
self.__data_bindings.extend([
self._model.bn_line.bind(
self.view.bn_defined
),
])
self.__event_connections.extend([
self.view.bn_tool_button_is_active.on_changed.connect(
self._hdl_tool_button_is_active_changed
),
])
self._hdl_tool_button_is_active_changed()
def _hdl_tool_button_is_active_changed(self) -> None:
if self._model.is_defining and not self.view.bn_tool_button_is_active.get():
self._model.discard_define()
def cursor_down(self, pos: Vector2[float]) -> None:
if not self.view.bn_tool_button_is_active.get():
return
if self._model.is_defining:
self._model.discard_define()
self._model.begin_define(pos)
self._update_dragging_indicator(pos)
def cursor_up(self, pos: Vector2[float]) -> None:
if not self.view.bn_tool_button_is_active.get():
return
if not self._model.is_defining:
return
self._model.commit_define(pos)
self._update_dragging_indicator(pos)
def cursor_move(self, pos: Vector2[float]) -> None:
self._update_dragging_indicator(pos)
def key_press(self, event: keyboard.KeyEvent) -> None:
if not self.view.bn_tool_button_is_active.get():
return
if self._model.is_defining:
# User is currently using mouse to define
return
if event.key is keyboard.Key.Up:
self._model.nudge_up()
elif event.key is keyboard.Key.Down:
self._model.nudge_down()
elif event.key is keyboard.Key.Left:
self._model.nudgerot_anticlockwise()
elif event.key is keyboard.Key.Right:
self._model.nudgerot_clockwise()
def _update_dragging_indicator(self, current_cursor_pos: Vector2[float]) -> None:
if not self._model.is_defining:
self.view.bn_dragging.set(None)
self.view.hide_control_point()
return
pt0 = self._model.begin_define_pos
pt1 = current_cursor_pos
if pt0 == pt1:
self.view.bn_dragging.set(None)
self.view.hide_control_point()
return
self.view.bn_dragging.set(Line2(
pt0=pt0,
pt1=pt1,
))
self.view.show_control_point(*self._model.begin_define_pos)
def _do_destroy(self) -> None:
for db in self.__data_bindings:
db.unbind()
for ec in self.__event_connections:
ec.disconnect()
|
jdber1/opendrop
|
opendrop/app/common/image_processing/plugins/define_line/component.py
|
Python
|
gpl-3.0
| 7,694
|
# -*- coding: utf-8 -*-
# Copyright (C) 2007 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""User-activatable actions for :class:`gaupol.Application`."""
from gaupol.actions.audio import * # noqa
from gaupol.actions.edit import * # noqa
from gaupol.actions.file import * # noqa
from gaupol.actions.help import * # noqa
from gaupol.actions.projects import * # noqa
from gaupol.actions.text import * # noqa
from gaupol.actions.tools import * # noqa
from gaupol.actions.video import * # noqa
from gaupol.actions.view import * # noqa
__all__ = tuple(x for x in dir() if x.endswith("Action"))
|
otsaloma/gaupol
|
gaupol/actions/__init__.py
|
Python
|
gpl-3.0
| 1,223
|
from . import _plotting_mess
data = _plotting_mess.complex_data
databoxes = _plotting_mess.complex_databoxes
files = _plotting_mess.complex_files
function = _plotting_mess.complex_function
|
Spinmob/spinmob
|
_plot_complex.py
|
Python
|
gpl-3.0
| 207
|
# Copyright 2009-2011 Klas Lindberg <klas.lindberg@gmail.com>
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
import sys
import struct
import socket
import json
import math
from tactile import IR
class ID:
SQUEEZEBOX = 2
SOFTSQUEEZE = 3
SQUEEZEBOX2 = 4
TRANSPORTER = 5
SOFTSQUEEZE3 = 6
RECEIVER = 7
SQUEEZESLAVE = 8
CONTROLLER = 9
SQUEEZEBOX3 = 104 # not reported by firmware, but infered from HELO msg.
debug = {
SQUEEZEBOX : 'SqueezeBox',
SOFTSQUEEZE : 'SoftSqueeze',
SQUEEZEBOX2 : 'SqueezeBox_2',
TRANSPORTER : 'Transporter',
SOFTSQUEEZE3 : 'SoftSqueeze_3',
RECEIVER : 'Receiver',
SQUEEZESLAVE : 'SqueezeSlave',
CONTROLLER : 'Controller',
SQUEEZEBOX3 : 'SqueezeBox_3'
}
# there are Messages and Commands. Messages are inbound from the device and
# are used in communication between dwite and the conman. Commands are outbound
# to the hardware device.
# The parser produces Message instances while the Command base class has a
# virtual function serialize() that all subclasses must implement. The
# serialized representation shall be writable on the control connection's
# socket.
# to make things really funky, the SqueezeBox does not use network order to
# describe integers. It also uses different integer sizes to describe message
# and command lengths. On top of this, JSON messages must use a larger size
# field than SB messages to be useful. Joy...
#
# Device message: size field located [4:8], unsigned long, little endian.
# Device command: size field located [0:2], unsigned short, little endian.
# JSON message: exactly like device message.
class Message(object):
head = None # string
class Helo(Message):
head = 'HELO'
def __init__(self, id, revision, mac_addr, uuid, language):
self.id = id # integer
self.revision = revision # integer
self.mac_addr = unicode(mac_addr) # string
self.uuid = unicode(uuid) # string
self.language = unicode(language) # string
def __str__(self):
return 'HELO: %s %d %s %s %s' % (ID.debug[self.id], self.revision,
self.mac_addr, self.uuid, self.language)
class Anic(Message):
head = 'ANIC'
def __str__(self):
return 'ANIC: -'
class Tactile(Message):
head = 'IR '
code = 0 # valid values taken from tactile.IR
stress = 0 # integer
def __init__(self, code, stress=0):
self.code = code
self.stress = stress
def __str__(self):
if self.code > 0:
return 'IR : %s %d' % (IR.codes_debug[self.code], self.stress)
else:
return 'IR R: %s %d' % (IR.codes_debug[-self.code], self.stress)
class Bye(Message):
head = 'BYE '
reason = 0 # integer
def __init__(self, reason):
self.reason = reason
def __str__(self):
if reason == 1:
return 'BYE : Player is going out for an upgrade'
return 'BYE : %d' % self.reason
class Stat(Message):
head = 'STAT'
event = None # 4 byte string. this is what SBS sources have to say about
# them: vfdc - vfd received, i2cc - i2c command recevied, STMa - AUTOSTART
# STMc - CONNECT, STMe - ESTABLISH, STMf - CLOSE, STMh - ENDOFHEADERS,
# STMp - PAUSE, STMr - UNPAUSE, STMt - TIMER, STMu - UNDERRUN,
# STMl - FULL (triggers start of synced playback), STMd - DECODE_READY
# (decoder has no more data), STMs - TRACK_STARTED (a new track started
# playing), STMn - NOT_SUPPORTED (decoder does not support the track format)
# STMz - pseudo-status derived from DSCO meaning end-of-stream.
# my understanding is that the STMz is not sent by the device but by the
# SBS to itself when it receives the DiSCOnnect message from the device.
# there are also a couple of undocumented events: STMo - the currently
# playing track is running out, aude - ACK of aude command, audg - ACK of
# audg command, strm - ACK of strm command (but which kind?).
# finally there is the undocumented non-event '\0\0\0\0' which is maybe only
# sent when the device connects to reveal transient state that survived in
# disconnected mode.
crlfs = 0 # uint8 number of rc/lf seen during header parsing
mas_init = 0 # uint8 'm' or 'p'. don't know what it is
mas_mode = 0 # uint8 SBS code comment only says "serdes mode"
in_size = 0 # uint32 size of RX buffer
in_fill = 0 # uint32 RX buffer fill
recv_hi = 0 # uint64, high bits. total bytes received
recv_lo = 0 # uint64, low bits. total bytes received
wifi_pow = 0 # uint16 wifi signal strength
jiffies = 0 # uint32 some sort of time slice indication
out_size = 0 # uint32 output buffer size
out_fill = 0 # uint32 output buffer fullness
seconds = 0 # uint32 elapsed playback seconds
voltage = 0 # uint32 analog output voltage. related to preamp value?
msecs = 0 # uint32 elapsed playback milliseconds
stamp = 0 # uint32 server timestamp used for latency tracking
error = 0 # uint16 only set in STAT/STMn? no SBS documentation
def __str__(self):
tmp1 = ( 'Event = "%s"\n' % self.event
+ 'CRLFs = %d\n' % self.crlfs
+ 'MAS init = %d\n' % self.mas_init
+ 'MAS mode = %d\n' % self.mas_mode
+ 'In buff = %d\n' % self.in_size
+ 'In fill = %d\n' % self.in_fill
+ 'Received = %d %d\n' % (self.recv_hi, self.recv_lo) )
if self.wifi_pow <= 100:
tmp2 = 'WiFi pow = %d\n' % self.wifi_pow
else:
tmp2 = 'Connection = Wired\n'
tmp3 = ( 'Jiffies = %d\n' % self.jiffies
+ 'Out buff = %d\n' % self.out_size
+ 'Out fill = %d\n' % self.out_fill
+ 'Elapsed = %d %d\n' % (self.seconds, self.msecs)
+ 'Voltage = %d\n' % self.voltage
+ 'Stamp = %d\n' % self.stamp
+ 'Error = %d\n' % self.error )
return '%s%s%s' % (tmp1, tmp2, tmp3)
def log(self, level):
if level > 0:
return (
'stat event=%s crlf=%d in-fill=%d rx=%d out-fill=%d'
% (self.event, self.crlfs, self.in_fill,
self.recv_hi << 32 | self.recv_lo, self.out_fill)
)
class Resp(Message):
head = 'RESP'
http_header = None # string
def __init__(self, http_header):
self.http_header = http_header
def __str__(self):
return 'RESP: %s' % self.http_header
class Ureq(Message):
head = 'UREQ'
def __str__(self):
return 'UREQ: -'
class Dsco(Message):
head = 'DSCO'
reason = 0 # uint8
def __init__(self, reason):
self.reason = reason
def __str__(self):
if self.reason == 0:
message = 'Connection closed normally'
elif self.reason == 1:
message = 'Connection reset by local host'
elif self.reason == 2:
message = 'Connection reset by remote host'
elif self.reason == 3:
message = 'Connection is no longer able to work'
elif self.reason == 4:
message = 'Connection timed out'
return 'DSCO: %s' % message
### COMMANDS ###################################################################
class Command(object):
def serialize(self):
raise Exception, 'All Command subclasses must implement serialize()'
class Strm(Command):
# the first couple of sections are just named constants to use in the
# "real" member values.
# operations
OP_START = 's'
OP_PAUSE = 'p'
OP_UNPAUSE = 'u'
OP_STOP = 'q'
OP_FLUSH = 'f'
OP_STATUS = 't'
OP_SKIP = 'a' # skip milliseconds in the output buffer
# autostart? ("extern" as in "extern source". e.g. internet radio.)
AUTOSTART_NO = '0'
AUTOSTART_YES = '1'
AUTOSTART_EXTERN_NO = '2'
AUTOSTART_EXTERN_YES = '3'
# formats
FORMAT_MPEG = 'm'
FORMAT_WAV = 'p' # also for AIF
FORMAT_FLAC = 'f'
FORMAT_WMA = 'w' # also for ASX
FORMAT_OGG = 'o'
# pcm sample sizes
PCM_SIZE_8 = '0'
PCM_SIZE_16 = '1'
PCM_SIZE_24 = '2'
PCM_SIZE_32 = '3'
# pcm KHz sample rates
PCM_RATE_8 = '5'
PCM_RATE_11 = '0'
PCM_RATE_12 = '6'
PCM_RATE_16 = '7'
PCM_RATE_22 = '1'
PCM_RATE_24 = '8'
PCM_RATE_32 = '2'
PCM_RATE_44 = '3' # 44.1, of course
PCM_RATE_48 = '4'
PCM_RATE_96 = '9'
# pcm channels
PCM_MONO = '1'
PCM_STEREO = '2'
# pcm endianness
PCM_BIG_ENDIAN = '0'
PCM_LITTLE_ENDIAN = '1'
# spdif enabled?
SPDIF_AUTO = struct.pack('<B', 0)
SPDIF_ENABLE = struct.pack('<B', 1)
SPDIF_DISABLE = struct.pack('<B', 2)
# fade types
FADE_NONE = '0'
FADE_CROSS = '1'
FADE_IN = '2'
FADE_OUT = '3'
FADE_INOUT = '4'
# other flags
FLAG_LOOP_FOREVER = 0x80 # loop over the buffer content forever
FLAG_DEC_NO_RESTART = 0x40 # don't restart the decoder (when do you?)
FLAG_INVERT_RIGHT = 0x02 # invert polarity, right channel
FLAG_INVERT_LEFT = 0x01 # invert polarity, left channel
# member values to serialize follow:
operation = None
autostart = '?'
format = '?'
pcm_sample_size = '?'
pcm_sample_rate = '?'
pcm_channels = '?'
pcm_endianness = '?'
in_threshold = 0 # KBytes of input data to buffer before autostart
# and/or notifying the server of buffer status
# struct.pack('<B', _)
spdif = SPDIF_DISABLE
fade_time = 0 # seconds to spend on fading between songs
# struct.pack('<B', _)
fade_type = FADE_NONE
flags = 0 # struct.pack('<B', _)
out_threshold = 0 # tenths of seconds of decoded audio to buffer
# before starting playback.
# struct.pack('<B', _)
reserved = struct.pack('<B', 0)
gain = (0,0) # playback gain in 16.16 fixed point
# struct.pack('<HH', htons(_), htons(_))
server_port = 0 # struct.pack('<H', socket.htons(3484))
server_ip = 0 # where to get the data stream (32 bit IPv4 addr).
# zero makes it use the same as the control server.
# struct.pack('<L', htonl(_))
resource = None # string to identify the file/stream on a CM server
seek = 0 # milliseconds
def serialize(self):
cmd = 'strm'
tmp = ( self.operation
+ self.autostart
+ self.format
+ self.pcm_sample_size
+ self.pcm_sample_rate
+ self.pcm_channels
+ self.pcm_endianness
+ struct.pack('<B', self.in_threshold)
+ self.spdif
+ struct.pack('<B', self.fade_time)
+ self.fade_type
+ struct.pack('<B', self.flags)
+ struct.pack('<B', self.out_threshold)
+ self.reserved
+ struct.pack('<HH', socket.htons(self.gain[0]),
socket.htons(self.gain[1]))
+ struct.pack('<H', socket.htons(self.server_port))
+ struct.pack('<L', socket.htonl(self.server_ip)) )
if len(tmp) != 24:
raise Exception, 'strm command not 24 bytes in length'
if self.operation == Strm.OP_START:
s = 'GET %s?seek=%s HTTP/1.0\r\n' % (self.resource, self.seek)
s = s.encode('utf-8')
params = tmp + struct.pack('%ds' % len(s), s)
# SqueezeCenter does this (on the GET, but it's all the same). why?
#if len(params) % 2 != 0:
# params = params + '\n'
else:
params = tmp
length = struct.pack('<H', socket.htons(len(cmd + params)))
return length + cmd + params
class StrmStart(Strm):
operation = Strm.OP_START
def __init__(self, ip, port, resource, seek=0, background=False):
assert type(ip) == int
assert type(port) == int
self.server_ip = ip
self.server_port = port
self.resource = resource
self.seek = seek
self.out_threshold = 1 # should be enough for low datarate formats
if background:
self.autostart = Strm.AUTOSTART_NO
else:
self.autostart = Strm.AUTOSTART_YES
class StrmStartMpeg(StrmStart):
format = Strm.FORMAT_MPEG
def __init__(self, ip, port, resource, seek=0, background=False):
StrmStart.__init__(self, ip, port, resource, seek, background)
class StrmStartFlac(StrmStart):
format = Strm.FORMAT_FLAC
def __init__(self, ip, port, resource, seek=0, background=False):
StrmStart.__init__(self, ip, port, resource, seek, background)
class StrmPause(Strm):
operation = Strm.OP_PAUSE
class StrmUnpause(Strm):
operation = Strm.OP_UNPAUSE
class StrmStop(Strm):
operation = Strm.OP_STOP
class StrmFlush(Strm):
operation = Strm.OP_FLUSH
class StrmStatus(Strm):
operation = Strm.OP_STATUS
class StrmSkip(Strm):
operation = Strm.OP_SKIP
def __init__(self, msecs):
self.gain = (0, msecs) # there are many uses for this field..
class Grfe(Command):
offset = 0 # only non-zero for the Transporter
transition = None # char
distance = 32 # transition start on the Y-axis. not well understood
bitmap = None # 4 * 320 chars for an SB2/3 display
def serialize(self):
cmd = 'grfe'
params = ( struct.pack('<H', socket.htons(self.offset))
+ self.transition
+ struct.pack('<B', self.distance)
+ self.bitmap )
length = struct.pack('<H', socket.htons(len(cmd + params)))
return length + cmd + params
class Grfb(Command):
brightness = None # uint16
def serialize(self):
cmd = 'grfb'
params = struct.pack('<H', socket.htons(self.brightness))
length = struct.pack('<H', socket.htons(len(cmd + params)))
return length + cmd + params
class Aude(Command):
# what to enable/disable? true/false
analog = True
digital = True
def __init__(self, analog, digital):
assert type(analog) == type(digital) == bool
self.analog = analog
self.digital = digital
def serialize(self):
cmd = 'aude'
params = ( struct.pack('<B', self.analog)
+ struct.pack('<B', self.digital) )
length = struct.pack('<H', socket.htons(len(cmd + params)))
return length + cmd + params
class Audg(Command):
# gain is represented as (16bit,16bit) fixed point floats. in practice it
# is easier to calculate them as long integers and send them in network
# order, instead of as 4 shorts in small endian order.
# dvc (digital volume control?) is boolean
# preamp must fit in a uint8
# legacy is the old-style gain control. not used, send junk
left = 0
right = 0
dvc = False
preamp = 255 # default to maximum
legacy = struct.pack('<LL', 0, 0)
def __init__(self, dvc, preamp, vol_l, vol_r):
vol_l = min(max(vol_l, 0), 100)
vol_r = min(max(vol_r, 0), 100)
self.dvc = dvc
self.preamp = preamp
self.left = self.volume2gain(vol_l)
self.right = self.volume2gain(vol_r)
def volume2gain(self, volume):
db = (volume - 100) / 2.0
multiplier = math.pow(10.0, db / 20.0)
if db >= -30.0 and db <= 0.0:
gain = int(multiplier * 256.0 + 0.5) << 8
else:
gain = int(multiplier * 65536.0 + 0.5)
return gain
def serialize(self):
# note that the packing order of the left/right fields really ARE
# big-endian. it's not a mistake!
cmd = 'audg'
params = ( self.legacy
+ struct.pack('<BB', self.dvc, self.preamp)
+ struct.pack('>LL', self.left, self.right) )
length = struct.pack('<H', socket.htons(len(cmd + params)))
return length + cmd + params
class Updn(Command):
def serialize(self):
cmd = 'updn'
params = ' '
length = struct.pack('<H', socket.htons(len(cmd + params)))
return length + cmd + params
class Visu(Command):
# kinds
NONE = 0
VUMETER = 1
SPECTRUM = 2
WAVEFORM = 3 # no documentation or example code available anywhere
# channels
STEREO = 0
MONO = 1
def __eq__(self, other):
if not other:
return False
return type(self) == type(other)
def __ne__(self, other):
return not self.__eq__(other)
def serialize(self):
raise Exception, 'Visu must be subclassed'
class VisuNone(Visu):
def serialize(self):
cmd = 'visu'
params = ( struct.pack('<B', Visu.NONE)
+ struct.pack('<B', 0) )
length = struct.pack('<H', socket.htons(len(cmd + params)))
return length + cmd + params
class VisuMeter(Visu):
# style
DIGITAL = 0
ANALOG = 1
#number of parameters
PARAMETERS = 6
# member values
channels = Visu.STEREO
style = DIGITAL
left_pos = 0
left_width = 0
right_pos = 0
right_width = 0
def __init__(self, left_pos=280,left_width=18,right_pos=302,right_width=18):
self.left_pos = left_pos
self.left_width = left_width
self.right_pos = right_pos
self.right_width = right_width
def serialize(self):
cmd = 'visu'
params = ( struct.pack('<B', Visu.VUMETER)
+ struct.pack('<B', self.PARAMETERS)
+ struct.pack('<l', socket.htonl(self.channels))
+ struct.pack('<l', socket.htonl(self.style))
+ struct.pack('<l', socket.htonl(self.left_pos))
+ struct.pack('<l', socket.htonl(self.left_width))
+ struct.pack('<l', socket.htonl(self.right_pos))
+ struct.pack('<l', socket.htonl(self.right_width)) )
length = struct.pack('<h', socket.htons(len(cmd + params)))
return length + cmd + params
class VisuSpectrum(Visu):
# bandwidth
HIGH_BANDWIDTH = 0 # 0..22050Hz
LOW_BANDWIDTH = 1 # 0..11025Hz
# orientation
LEFT_TO_RIGHT = 0
RIGHT_TO_LEFT = 1
# clipping
CLIP_NOTHING = 0 # show all subbands
CLIP_HIGH = 1 # clip higher subbands
# bar intensity
MILD = 1
MEDIUM = 2
HOT = 3
PARAMETERS = 19
# member values
channels = Visu.STEREO
bandwidth = HIGH_BANDWIDTH
preemphasis = 0x10000 # dB per KHz
left_pos = 0
left_width = 160
left_orientation = LEFT_TO_RIGHT
left_bar_width = 4
left_bar_spacing = 1
left_clipping = CLIP_HIGH
left_bar_intensity = MILD
left_cap_intensity = HOT
right_pos = 160
right_width = 160
right_orientation = RIGHT_TO_LEFT
right_bar_width = 4
right_bar_spacing = 1
right_clipping = CLIP_HIGH
right_bar_intensity = MILD
right_cap_intensity = HOT
def serialize(self):
cmd = 'visu'
params = ( struct.pack('<B', Visu.SPECTRUM)
+ struct.pack('<B', self.PARAMETERS)
+ struct.pack('<l', socket.htonl(self.channels))
+ struct.pack('<l', socket.htonl(self.bandwidth))
+ struct.pack('<l', socket.htonl(self.preemphasis))
+ struct.pack('<l', socket.htonl(self.left_pos))
+ struct.pack('<l', socket.htonl(self.left_width))
+ struct.pack('<l', socket.htonl(self.left_orientation))
+ struct.pack('<l', socket.htonl(self.left_bar_width))
+ struct.pack('<l', socket.htonl(self.left_bar_spacing))
+ struct.pack('<l', socket.htonl(self.left_clipping))
+ struct.pack('<l', socket.htonl(self.left_bar_intensity))
+ struct.pack('<l', socket.htonl(self.left_cap_intensity))
+ struct.pack('<l', socket.htonl(self.right_pos))
+ struct.pack('<l', socket.htonl(self.right_width))
+ struct.pack('<l', socket.htonl(self.right_orientation))
+ struct.pack('<l', socket.htonl(self.right_bar_width))
+ struct.pack('<l', socket.htonl(self.right_bar_spacing))
+ struct.pack('<l', socket.htonl(self.right_clipping))
+ struct.pack('<l', socket.htonl(self.right_bar_intensity))
+ struct.pack('<l', socket.htonl(self.right_cap_intensity)) )
length = struct.pack('<h', socket.htons(len(cmd + params)))
return length + cmd + params
class Ping(Command):
# there is no command to explicitly poll a device for liveness, but the
# 'stat' command works fine for this purpose. will receive back a STAT
# message with .event=='stat'.
def serialize(self):
cmd = 'stat'
params = ''
length = struct.pack('<H', socket.htons(len(cmd + params)))
return length + cmd + params
# JSON based messages. Note that there is no Command class for JSON messaging.
# all communication is done with a common tree of message classes.
class JsonMessage(Message):
head = 'JSON'
guid = 0 # integer to tie results to method calls
wire = None # back reference so that replies can easily be sent back
def __init__(self, guid):
assert type(guid) == int
if guid < 0:
guid = make_json_guid()
json_guids[guid] = self
self.guid = guid
def __str__(self):
return unicode(self.dump())
def dump(self):
return { 'guid': self.guid }
def serialize(self):
data = json.dumps(self.dump())
length = struct.pack('<L', socket.htonl(len(data)))
return self.head + length + data
def respond(self, errno, errstr, chunk, more, result):
if self.wire:
msg = JsonResult(self.guid, errno, errstr, chunk, more, result)
self.wire.send(msg.serialize())
class JsonCall(JsonMessage):
method = None # unicode string
params = None # JSON compatible dictionary
def __init__(self, guid, method, params):
JsonMessage.__init__(self, guid)
assert type(method) == unicode
assert type(params) == dict
self.method = method
self.params = params
def __getattr__(self, name):
if name in self.params:
return self.params[name]
else:
raise AttributeError(name)
def dump(self):
r = JsonMessage.dump(self)
r.update({
'method': self.method,
'params': self.params
})
return r
# this command is used by a content manager to hail a device manager. There
# is no reply message class.
class Hail(JsonCall):
def __init__(self, guid, label, stream_ip, stream_port):
assert type(label) == unicode
assert type(stream_ip) == int
assert type(stream_port) == int
params = {
'label' : label,
'stream_ip' : stream_ip,
'stream_port': stream_port
}
JsonCall.__init__(self, guid, u'hail', params)
# used by device manager to ask content manager for a listing of the contents
# of some item by GUID. use JsonResult to reply.
class Ls(JsonCall):
def __init__(self, guid, item, recursive=False, parent=False):
assert type(item) == unicode
assert type(recursive) == bool
assert type(parent) == bool
params = {
'item' : item,
'recursive': recursive,
'parent' : parent
}
JsonCall.__init__(self, guid, u'ls', params)
# used by content managers to send available search terms to the device
# manager. there is no reply message class.
class Terms(JsonCall):
sender = None
def __init__(self, guid, terms):
assert type(terms) == list
JsonCall.__init__(self, guid, u'terms', { 'terms': terms })
class Play(JsonCall):
def __init__(
self, guid, url, seek=0, kind=None, pretty=None, size=None,
duration=None
):
assert type(url) == unicode
assert type(seek) == int
assert (not kind) or type(kind) == unicode
assert (not pretty) or type(pretty) == dict
assert (not size) or type(size) == int
assert (not duration) or type(duration) == int
params = {
'url' : url,
'seek' : seek,
'kind' : kind,
'pretty' : pretty,
'size' : size,
'duration': duration
}
JsonCall.__init__(self, guid, u'play', params)
class Add(JsonCall):
def __init__(
self, guid, url, kind=None, pretty=None, size=None, duration=None
):
assert type(url) == unicode
assert (not kind) or type(kind) == unicode
assert (not pretty) or type(pretty) == dict
assert (not size) or type(size) == int
assert (not duration) or type(duration) == int
if pretty and 'label' in pretty:
assert type(pretty['label']) == unicode
params = {
'url' : url,
'kind' : kind,
'pretty' : pretty,
'size' : size,
'duration': duration
}
JsonCall.__init__(self, guid, u'add', params)
class GetItem(JsonCall):
def __init__(self, guid, item):
assert type(item) == unicode
JsonCall.__init__(self, guid, u'get_item', { 'item': item })
class GetTerms(JsonCall):
def __init__(self, guid):
JsonCall.__init__(self, guid, u'get_terms', {})
class Search(JsonCall):
terms = None
def __init__(self, guid, terms):
assert type(terms) == list
JsonCall.__init__(self, guid, u'search', { 'terms': terms })
self.terms = terms
class JsonResult(JsonMessage):
def __init__(self, guid, errno, errstr, chunk, more, result):
JsonMessage.__init__(self, guid)
assert type(errno) == int
assert type(errstr) == unicode
assert type(chunk) == int
assert type(more) == bool
# no type checking done on result. can be any JSON compatible object.
self.errno = errno
self.errstr = errstr
self.chunk = chunk
self.more = more
self.result = result
def dump(self):
r = JsonMessage.dump(self)
r.update({
'method': u'result',
'errno' : self.errno,
'errstr': self.errstr,
'chunk' : self.chunk,
'more' : self.more,
'result': self.result
})
return r
def parse_json(data):
body = json.loads(data)
method = body['method']
guid = body['guid']
if method == u'result':
del body['method']
return JsonResult(**body)
else:
params = body['params']
if method == u'hail':
return Hail(guid, **params)
if method == u'ls':
return Ls(guid, **params)
if method == u'terms':
return Terms(guid, **params)
if method == u'play':
return Play(guid, **params)
if method == u'add':
return Add(guid, **params)
if method == u'get_item':
return GetItem(guid, **params)
if method == u'terms':
return Terms(guid, **params)
if method == u'search':
return Search(guid, **params)
if method == u'get_terms':
return GetTerms(guid, **params)
return None
# only used to debug malformed messages
def parsable(data):
kind = data[0:4]
if kind not in ['HELO', 'ANIC', 'IR ', 'BYE!', 'STAT', 'RESP', 'UREQ',
'JSON']:
return False
blen = socket.ntohl(struct.unpack('<L', data[4:8])[0])
if blen > len(data) - 8:
return False
return True
def human_readable(data):
for i in range(len(data) - 1):
if ((ord(data[i]) >= 65 and ord(data[i]) <= 90)
or (ord(data[i]) >= 97 and ord(data[i]) <= 122)
or (ord(data[i]) in [32, 45, 46, 47, 58, 95])):
buf = buf + '%c' % data[i]
else:
buf = buf + '\\%03d' % ord(data[i])
return buf
def first_unprintable(data):
for i in range(len(data)):
if ((ord(data[i]) not in [9, 10, 13])
and (ord(data[i]) < 32 or ord(data[i]) > 126)):
return i
return len(data)
def parse_header(head):
try:
kind = head[0:4]
if kind not in ['HELO', 'ANIC', 'IR ', 'BYE!', 'STAT', 'RESP',
'UREQ', 'JSON', 'DSCO']:
#print('ERROR: unknown header kind %s' % kind)
return (None, 0)
size = socket.ntohl(struct.unpack('<L', head[4:8])[0])
return (kind, size)
except Exception, e:
print e
return (None, 0)
def parse_body(kind, size, body):
if kind == 'HELO':
if size == 10:
msg = parse_helo_10(body, size)
elif size == 36:
msg = parse_helo_36(body, size)
return msg
if kind == 'ANIC':
return Anic()
if kind == 'IR ':
return parse_ir(body, size)
if kind == 'BYE!':
return parse_bye(body, size)
if kind == 'STAT':
return parse_stat(body, size)
if kind == 'RESP':
return parse_resp(body, size)
if kind == 'UREQ':
return parse_ureq(body, size)
if kind == 'JSON':
return parse_json(body)
if kind == 'DSCO':
return parse_dsco(body, size)
print('unknown message, len %d. first 160 chars:' % size)
print(human_readable(body))
#sys.exit(1)
# look for next message in the mess:
#for i in range(len(data) - 4):
# if parsable(data[i:]):
# print('Recovered parsable message')
# return (None, data[i:])
return None
def parse_helo_10(data, dlen):
id = ord(data[0])
revision = ord(data[1])
tmp = struct.unpack('<6BH', data[2:])
mac_addr = tuple(tmp[0:6])
wlan_chn = socket.ntohs(tmp[6])
mac_addr = '%02x:%02x:%02x:%02x:%02x:%02x' % mac_addr
return Helo(id, revision, mac_addr, 1234, 'EN')
def parse_helo_36(data, dlen):
id = ord(data[0])
revision = ord(data[1])
tmp = struct.unpack('<6B16BHLL2s', data[2:])
mac_addr = tuple(tmp[0:6])
# why not just cook a new device number?
if id == ID.SQUEEZEBOX2 and mac_addr[0:3] == (0x0,0x4,0x20):
id = ID.SQUEEZEBOX3
uuid = ''.join(str(i) for i in tmp[6:22])
wlan_chn = socket.ntohs(tmp[22])
recv_hi = socket.ntohl(tmp[23])
recv_lo = socket.ntohl(tmp[24])
language = tmp[25]
mac_addr = '%02x:%02x:%02x:%02x:%02x:%02x' % mac_addr
return Helo(id, revision, mac_addr, uuid, language)
last_ir = None # tuple: (IR code, time stamp, stress)
def parse_ir(data, dlen):
global last_ir
stamp = socket.ntohl(struct.unpack('<L', data[0:4])[0])
format = struct.unpack('<B', data[4:5])[0]
nr_bits = struct.unpack('<B', data[5:6])[0]
code = socket.ntohl(struct.unpack('<L', data[6:10])[0])
if code not in IR.codes_debug:
print('stamp %d' % stamp)
print('format %d' % format)
print('nr bits %d' % nr_bits)
print('UNKNOWN ir code %d' % code)
last_ir = None
return None
stress = 0
if last_ir and last_ir[0] == code:
# the same key was pressed again. if it was done fast enough,
# then we *guess* that the user is keeping it pressed, rather
# than hitting it again real fast. unfortunately the remote
# doesn't generate key release events.
#print('Stamp %d, diff %d' % (stamp, stamp - last_ir[1]))
if stamp - last_ir[1] < 130: # milliseconds
# the threshold can't be set below 108 which seems to be the
# rate at which the SB3 generates remote events. at the same
# time it is quite impossible to manually hit keys faster
# than once per 140ms, so 130ms should be a good threshold.
stress = last_ir[2] + 1
else:
stress = 0
last_ir = (code, stamp, stress)
return Tactile(code, stress)
def parse_bye(data, dlen):
reason = struct.unpack('<B', data[0])
return Bye(reason)
def parse_stat(data, dlen):
stat = Stat()
stat.event = data[0:4]
stat.crlfs = struct.unpack('<B', data[4])[0]
stat.mas_init = struct.unpack('<B', data[5])[0]
stat.mas_mode = struct.unpack('<B', data[6])[0]
stat.in_size = socket.ntohl(struct.unpack('<L', data[ 7:11])[0])
stat.in_fill = socket.ntohl(struct.unpack('<L', data[11:15])[0])
stat.recv_hi = socket.ntohl(struct.unpack('<L', data[15:19])[0])
stat.recv_lo = socket.ntohl(struct.unpack('<L', data[19:23])[0])
stat.wifi_pow = socket.ntohs(struct.unpack('<H', data[23:25])[0])
stat.jiffies = socket.ntohl(struct.unpack('<L', data[25:29])[0])
stat.out_size = socket.ntohl(struct.unpack('<L', data[29:33])[0])
stat.out_fill = socket.ntohl(struct.unpack('<L', data[33:37])[0])
stat.seconds = socket.ntohl(struct.unpack('<L', data[37:41])[0])
stat.voltage = socket.ntohs(struct.unpack('<H', data[41:43])[0])
stat.msecs = socket.ntohl(struct.unpack('<L', data[43:47])[0])
stat.stamp = socket.ntohl(struct.unpack('<L', data[47:51])[0])
stat.error = socket.ntohl(struct.unpack('<H', data[51:53])[0])
return stat
def parse_resp(data, dlen):
# data is always an HTTP header. In fact the very same one we sent
# on the streaming socket, unless the device is streaming from some
# other source.
return Resp(data)
def parse_ureq(data, dlen):
return Ureq()
def parse_dsco(data, dlen):
reason = struct.unpack('<B', data[0])[0]
return Dsco(reason)
|
Mysingen/dwite
|
protocol.py
|
Python
|
gpl-3.0
| 30,893
|
# This file is part of Boomer Core.
#
# Boomer Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Boomer Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Boomer Core. If not, see <http://www.gnu.org/licenses/>.
#
# Forked from Mycroft Core on 2017-07-29
import os
from os.path import join, expanduser, isdir
__author__ = 'jdorleans'
class FileSystemAccess(object):
"""
A class for providing access to the boomer FS sandbox. Intended to be
attached to skills
at initialization time to provide a skill-specific namespace.
"""
def __init__(self, path):
self.path = self.__init_path(path)
@staticmethod
def __init_path(path):
if not isinstance(path, str) or len(path) == 0:
raise ValueError("path must be initialized as a non empty string")
path = join(expanduser('~'), '.boomer', path)
if not isdir(path):
os.makedirs(path)
return path
def open(self, filename, mode):
"""
Get a handle to a file (with the provided mode) within the
skill-specific namespace.
:param filename: a str representing a path relative to the namespace.
subdirs not currently supported.
:param mode: a file handle mode
:return: an open file handle.
"""
file_path = join(self.path, filename)
return open(file_path, mode)
def exists(self, filename):
return os.path.exists(join(self.path, filename))
|
clusterfudge/boomer
|
boomer/filesystem/__init__.py
|
Python
|
gpl-3.0
| 1,932
|
import os
from common_helper_files import get_dir_of_file
from pluginbase import PluginBase
import logging
class FilterSystem():
FILTER_TYPE = None
def __init__(self, selected_filters):
self._init_plugins()
if selected_filters == 'all':
self._set_all_filters()
else:
self._set_filters_to_apply(selected_filters)
self._setup_counters()
def _init_plugins(self):
self.plugin_base = PluginBase(package='filter_plugins.{}'.format(self.FILTER_TYPE))
self.filter_plugins = dict()
self.plugin_source = self.plugin_base.make_plugin_source(searchpath=[os.path.join(get_dir_of_file(__file__), '../filter_plugins/{}'.format(self.FILTER_TYPE))])
plugin_list = self.plugin_source.list_plugins()
for item in plugin_list:
plugin = self.plugin_source.load_plugin(item)
plugin.setup(self)
def register_plugin(self, name, filter_function):
self.filter_plugins[name] = filter_function
def _set_all_filters(self):
self.filters_to_apply = list(self.filter_plugins.keys())
def _setup_counters(self):
self.counter = dict()
for item in self.filters_to_apply:
self.counter[item] = 0
def _set_filters_to_apply(self, filter_list):
self.filters_to_apply = list()
for item in filter_list:
if item in self.filter_plugins:
self.filters_to_apply.append(item)
else:
logging.error('Filter "{}" is not available!'.format(item))
|
weidenba/recovery_sort
|
filter_system/base.py
|
Python
|
gpl-3.0
| 1,565
|
#!/bin/env python
# -*- coding: utf-8; -*-
#
# (c) 2016 FABtotum, http://www.fabtotum.com
#
# This file is part of FABUI.
#
# FABUI is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# FABUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FABUI. If not, see <http://www.gnu.org/licenses/>.
# Import standard python module
# Import external modules
# Import internal modules
|
infinity0n3/fabtotum-experiments
|
fabtotum/fabui/database.py
|
Python
|
gpl-3.0
| 825
|
#!/usr/bin/env python
from __future__ import division
import numpy as np
import pylab
def PCA(Z, verbose = 0):
"""
PCA Mc Vean style
Parameters
----------
Z : (L, n) array
SNP array matrix. Matrix of 0s and 1s that represent the two alleles.
Returns
-------
Y : (n, p) array
Configuration matrix. Each column represents a dimension. Only the
p dimensions corresponding to positive eigenvalues of B are returned.
Note that each dimension is only determined up to an overall sign,
corresponding to a reflection.
dimension 0: Y[:,0]
dimension 1: Y[:,1]
dimension 2: Y[:,2]
...
e : (n,) array
Eigenvalues of B.
"""
# Number of SNPs
L = len(Z)
if verbose: print "number of SNPs: ",L
# Centering matrix remove the average of each row
X = Z-np.average(Z,axis=1).reshape(L,1)
# matrix to diagonalise
M = 1./L*(X.T.dot(X))
# Diagonalize
evals, evecs = np.linalg.eigh(M)
# Sort by eigenvalue in descending order
idx = np.argsort(evals)[::-1]
evals = evals[idx]
evecs = evecs[:,idx]
# Compute the coordinates using positive-eigenvalued components only
w, = np.where(evals > 0)
L = np.diag(np.sqrt(evals[w]))
V = evecs[:,w]
Y = V.dot(L)
return evals,evecs,Y
'''
Data = np.array([[0,1],[1,0],[0,0]])
evals, evecs, Y = PCA(Data)
print "evals ", evals
print "coords ",Y
print "coords ",Y[0,:]
pylab.plot(Y[:,0],'o')
pylab.show()
'''
|
sapfo/medeas
|
src/old_scripts/python_pca.py
|
Python
|
gpl-3.0
| 3,142
|
"""
Created on 11.09.2014
@author: benjamin@boerngen-schmidt.de
"""
from abc import ABCMeta, abstractmethod
import random
import datetime
class BaseCar(metaclass=ABCMeta):
"""
Represents the fundamentals of a car
"""
def __init__(self, env, tank_size):
"""
Constructor
:type tank_size: int
:type env: simulation.environment.SimulationEnvironment
"""
env.car = self
self.env = env
self._tankSize = float(tank_size)
self._tankFilling = BaseCar._random_tank_filling(self._tankSize)
self._current_position = None
self._fuel_type = 'e5'
self._driven_distance = float(0)
# self.log = logging.getLogger('spritsim.Car' + commuter_id)
@staticmethod
def _random_tank_filling(maximum):
"""
Returns a random tank filling in litre
Method for initializing a cars with a random tank filling between 10 and maximum litres
:param maximum: maximum tank capacity
:return: A random filling
:rtype: float
"""
return random.uniform(10, maximum)
@property
def current_position(self):
"""Returns the nodes target ID
:rtype: int
"""
return self._current_position
@property
def driven_distance(self):
"""
The car's odometer
:return: The total distance the car has traveled
:rtype: float
"""
return self._driven_distance
@property
def fuel_type(self):
"""
The car's fuel type
:return: Type of fuel (e5|diesel)
:rtype: str
"""
return self._fuel_type
@property
def tank_size(self):
"""
:return: Size of the car's tank in litre
:rtype: float
"""
return self._tankSize
@property
def current_filling(self):
"""
:return: Current filling of the car's tank
:rtype: float
"""
return self._tankFilling
def consume_fuel(self, speed, distance, road_type):
"""
:param int speed: Maximum allowed speed
:param float distance: Length of the segment
:param simulation.routing.route.RouteClazz road_type: The type of the road
:return:
"""
self._tankFilling -= self.consumption_per_km * distance
@property
@abstractmethod
def consumption_per_km(self):
"""
:return: The fuel consumption of the car per km
:rtype: float
"""
pass
@property
def km_left(self):
"""
Returns the remaining km the car can drive
:return: Distance car is able to drive
:rtype: float
"""
return self.current_filling / self.consumption_per_km
def refilled(self):
"""Car has been refilled at a filling station"""
self._tankFilling = self._tankSize
def drive(self, ignore_refill_warning=False):
"""Lets the car drive the given route
On arrival at the destination the a CommuterAction for the route is returned or if the car needs refilling
the action to search for a refilling station is returned.
:param ignore_refill_warning: Tells the function not to raise a RefillWarning (default: False)
:type ignore_refill_warning: bool
:raises RefillWarning: If the tank filling is less or equal 5.0 liter
"""
for segment in self.env.route:
self._do_driving(segment)
self.env.consume_time(datetime.timedelta(seconds=segment.travel_time))
# check if driving the segment has
if self._tankFilling <= 5.0 and not ignore_refill_warning:
raise RefillWarning()
def _do_driving(self, segment):
"""
Drives the given route segment
Uses the segment data to simulate the driving of the car. Thereby fuel is consumed to the amount calculated
by the consume_fuel method.
:param segment: a single fragment of the route
:type segment: simulation.routing.route.RouteFragment
"""
self.consume_fuel(segment.speed_limit, segment.length, segment.road_type)
self._driven_distance += segment.length
self._current_position = segment.target
class PetrolCar(BaseCar):
def __init__(self, env):
super().__init__(env, 50)
self._fuel_type = 'e5'
@property
def consumption_per_km(self):
"""
Consumes standard of 10 Liter per 100km, an equivalent of 0.1 L/km
:return: fuel consumption per 1 km in liter
:rtype: float
"""
return 0.1
class DieselCar(BaseCar):
def __init__(self, env):
super().__init__(env, 50)
self._fuel_type = 'diesel'
@property
def consumption_per_km(self):
"""
Consumes standard of 8 litre per 100km, an equivalent of 0.08 L/km
:return: fuel consumption per 1 km in liter
:rtype: float
"""
return 0.08
class RefillWarning(Exception):
pass
|
boerngen-schmidt/commuter-simulation
|
code/simulation/car.py
|
Python
|
gpl-3.0
| 5,072
|
## Copyright 2009 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
import os
def again(request,*args,**kw):
get = request.GET.copy()
for k,v in kw.items():
if v is None: # value None means "remove this key"
if get.has_key(k):
del get[k]
else:
get[k] = v
path = request.path
if len(args):
path += "/" + "/".join(args)
path = os.path.normpath(path)
path = path.replace("\\","/")
s = get.urlencode()
if len(s):
path += "?" + s
#print pth
return path
def get_redirect(request):
if hasattr(request,"redirect_to"):
return request.redirect_to
def redirect_to(request,url):
request.redirect_to = url
#~ def is_editing(request):
#~ editing = request.GET.get("editing",None)
#~ if editing is None:
#~ path = request.session.get("editing",None)
#~ else:
#~ editing = int(editing)
#~ if editing:
#~ request.session["editing"] = path = request.path
#~ else:
#~ request.session["editing"] = path = None
#~ if request.path == path:
#~ return True
#~ request.session["editing"] = None
#~ return False
#~ def stop_editing(request):
#~ request.session["editing"] = None
#~ def start_editing(request):
#~ request.session["editing"] = request.path
|
MaxTyutyunnikov/lino
|
lino/utils/requests.py
|
Python
|
gpl-3.0
| 1,997
|
import AI.pad
import AI.state
class Character:
def __init__(self, pad_path):
self.action_list = []
self.last_action = 0
self.pad = AI.pad.Pad(pad_path)
self.state = AI.state.State()
#Set False to enable character selection
self.test_mode = True
self.sm = AI.state_manager.StateManager(self.state, self.test_mode)
#test_mode = False, Selects character each run
def make_action(self, mm):
if self.state.menu == AI.state.Menu.Game:
self.advance()
elif self.state.menu == AI.state.Menu.Characters:
mm.pick_fox(self.state, self.pad)
elif self.state.menu == AI.state.Menu.Stages:
self.pad.tilt_stick(AI.pad.Stick.C, 0.5, 0.5)
elif self.state.menu == AI.state.Menu.PostGame:
mm.press_start_lots(self.state, self.pad)
#test_mode = True, AI starts fighting each run, saves time during testing
def make_action_test(self, mm):
if self.state.menu == AI.state.Menu.Game:
self.advance()
elif self.state.menu == AI.state.Menu.PostGame:
mm.press_start_lots(self.state, self.pad)
#implemented by each character to decide what to do
#includes some states where each character will respond the same
def logic(self):
if AI.state.is_spawning(self.state.players[2].action_state):
self.tilt_stick(60, 'DOWN')
self.tilt_stick(3, None)
#compare AI's current state
def compare_AI_state(self, test_state):
return self.state.players[2].action_state is test_state
#compare P1 current state
def compare_P1_state(self, test_state):
return self.state.players[0].action_state is test_state
#executes button presses defined in action_list, runs logic() once list is empty
def advance(self):
while self.action_list:
wait, func, args = self.action_list[0]
if self.state.frame - self.last_action < wait:
return
else:
self.action_list.pop(0)
if func is not None:
func(*args)
self.last_action = self.state.frame
else:
self.logic()
'''Methods simulate controller input; appends necessary tuple to action_list'''
def press_button(self, wait, button):
self.action_list.append((wait, self.pad.press_button, [button]))
def release_button(self, wait, button):
self.action_list.append((wait, self.pad.release_button, [button]))
def tilt_stick(self, wait, direction):
if direction is 'UP':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 1.0]))
elif direction is 'DOWN':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 0.0]))
elif direction is 'DOWN_LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.25, 0.25]))
elif direction is 'DOWN_RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.75, 0.25]))
elif direction is 'RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 1.0, 0.5]))
elif direction is 'LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.0, 0.5]))
elif direction is None:
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 0.5]))
def tilt_c_stick(self, wait, direction):
if direction is 'UP':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 1.0]))
elif direction is 'DOWN':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 0.0]))
elif direction is 'RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 1.0, 0.5]))
elif direction is 'LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.0, 0.5]))
elif direction is None:
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 0.5]))
def press_trigger(self, wait, amount):
self.action_list.append((wait, self.pad.press_trigger, [AI.pad.Trigger.L, amount]))
def wait(self, wait):
self.action_list.append((wait, None, []))
'''Execute actions shared among all characters'''
def style(self, wait):
pass
def side_b(self, wait):
self.tilt_stick(wait, 'RIGHT')
self.press_button(1, AI.pad.Button.B)
self.release_button(2, AI.pad.Button.B)
self.tilt_stick(2, None)
def shield(self, wait, length):
self.press_trigger(wait, 0.3)
self.press_trigger(length, 0.0)
def dashdance(self, wait, length):
self.wait(wait)
for _ in range(length):
self.tilt_stick(4, 'LEFT')
self.tilt_stick(4, 'RIGHT')
self.tilt_stick(1, None)
def shorthop(self, wait):
self.press_button(wait, AI.pad.Button.X)
self.release_button(1, AI.pad.Button.X)
'''Execute similar actions that is dependent on character frame data'''
def wavedash(self, wait, direction, wait_airdodge):
self.tilt_stick(wait, direction)
self.shorthop(1)
self.press_button(wait_airdodge, AI.pad.Button.L)
self.release_button(2, AI.pad.Button.L)
self.tilt_stick(1, None)
def shorthop_nair(self, wait, wait_attack, wait_ff):
self.shorthop(wait)
self.press_button(wait_attack, AI.pad.Button.A)
self.release_button(1, AI.pad.Button.A)
self.tilt_stick(wait_ff, 'DOWN')
self.tilt_stick(3, None)
self.press_trigger(2, 0.5)
self.press_trigger(1, 0.0)
|
alex-zoltowski/SSBM-AI
|
AI/Characters/character.py
|
Python
|
gpl-3.0
| 5,818
|
# coding = utf-8
"""
3.8 将 KMeans 用于离群点检测
http://git.oschina.net/wizardforcel/sklearn-cb/blob/master/3.md
"""
# 生成 100 个点的单个数据块,然后识别 5 个离形心最远的点
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
(x, labels) = make_blobs(100, centers=1)
kms = KMeans(n_clusters=1)
kms.fit(x)
# 识别 5 个最远的点
dist = kms.transform(x)
sortedIdx = np.argsort(dist.ravel())[::-1][:5]
# 移除这些点
nx = np.delete(x, sortedIdx, axis=0)
# 形心位置变化了
nkms = KMeans(n_clusters=1)
nkms.fit(nx)
from matplotlib import pyplot as plt
plt.style.use("ggplot")
(fig, ax) = plt.subplots(figsize=(6, 5))
ax.scatter(x[:, 0], x[:, 1], s=10, label="点")
ax.scatter(kms.cluster_centers_[:, 0], kms.cluster_centers_[:, 1], label="形心", s=50, alpha=0.7)
ax.scatter(x[sortedIdx][:, 0], x[sortedIdx][:, 1], label="极值", s=100, alpha=0.7)
ax.scatter(nkms.cluster_centers_[:, 0], nkms.cluster_centers_[:, 1], label="新的形心", s=50, alpha=0.7)
ax.set_title("单点簇集")
ax.legend(loc="best")
fig.tight_layout()
fig.show()
plt.show()
|
Ginkgo-Biloba/Misc-Python
|
sklearn/SKLearn3KMOutlier.py
|
Python
|
gpl-3.0
| 1,135
|
import sys
sys.path = ['.'] + sys.path
from test.test_support import verbose, run_unittest
import re
from re import Scanner
import sys, os, traceback
from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefuly modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
def test_weakref(self):
s = 'QabbbcR'
x = re.compile('ab+c')
y = proxy(x)
self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1140(self):
# re.sub(x, y, u'') should return u'', not '', and
# re.sub(x, y, '') should return '', not u''.
# Also:
# re.sub(x, y, unicode(x)) should return unicode(y), and
# re.sub(x, y, str(x)) should return
# str(y) if isinstance(y, str) else unicode(y).
for x in 'x', u'x':
for y in 'y', u'y':
z = re.sub(x, y, u'')
self.assertEqual(z, u'')
self.assertEqual(type(z), unicode)
#
z = re.sub(x, y, '')
self.assertEqual(z, '')
self.assertEqual(type(z), str)
#
z = re.sub(x, y, unicode(x))
self.assertEqual(z, y)
self.assertEqual(type(z), unicode)
#
z = re.sub(x, y, str(x))
self.assertEqual(z, y)
self.assertEqual(type(z), type(y))
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
u"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
u"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", u"\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", u"abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", u"\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_bigcharset(self):
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222").group(1), u"\u2222")
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222", re.UNICODE).group(1), u"\u2222")
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", u"ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def test_re_escape(self):
p=""
for i in range(0, 256):
p = p + chr(i)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)) is not None,
True)
self.assertEqual(re.match(re.escape(chr(i)), chr(i)).span(), (0,1))
pat=re.compile(re.escape(p))
self.assertEqual(pat.match(p) is not None, True)
self.assertEqual(pat.match(p).span(), (0,256))
def test_pickling(self):
import pickle
self.pickle_test(pickle)
import cPickle
self.pickle_test(cPickle)
# old pickles expect the _compile() reconstructor in sre module
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "The sre module is deprecated",
DeprecationWarning)
from sre import _compile
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"\%03o" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\%03o0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\%03o8" % i, chr(i)+"8"), None)
self.assertNotEqual(re.match(r"\x%02x" % i, chr(i)), None)
self.assertNotEqual(re.match(r"\x%02x0" % i, chr(i)+"0"), None)
self.assertNotEqual(re.match(r"\x%02xz" % i, chr(i)+"z"), None)
self.assertRaises(re.error, re.match, "\911", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertNotEqual(re.match(r"[\%03o]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\%03o8]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02x0]" % i, chr(i)), None)
self.assertNotEqual(re.match(r"[\x%02xz]" % i, chr(i)), None)
self.assertRaises(re.error, re.match, "[\911]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat=u"["+re.escape(u"\u2039")+u"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_scanner(self):
def s_ident(scanner, token): return token
def s_operator(scanner, token): return "op%s" % token
def s_float(scanner, token): return float(token)
def s_int(scanner, token): return int(token)
scanner = Scanner([
(r"[a-zA-Z_]\w*", s_ident),
(r"\d+\.\d*", s_float),
(r"\d+", s_int),
(r"=|\+|-|\*|/", s_operator),
(r"\s+", None),
])
self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
(['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
try:
unicode
except NameError:
return # no problem if we have no unicode
class my_unicode(unicode): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
def test_bug_926075(self):
try:
unicode
except NameError:
return # no problem if we have no unicode
self.assert_(re.compile('bug_926075') is not
re.compile(eval("u'bug_926075'")))
def test_bug_931848(self):
try:
unicode
except NameError:
pass
pattern = eval('u"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(iter.next().span(), (1,2))
self.assertRaises(StopIteration, iter.next)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(iter.next().span(), (0, 4))
self.assertEqual(iter.next().span(), (4, 4))
self.assertRaises(StopIteration, iter.next)
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'cbBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile("bla").match(a), None)
self.assertEqual(re.compile("").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = unichr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = unichr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def test_dealloc(self):
# issue 3299: check for segfault in debug build
import _sre
# the overflow limit is different on wide and narrow builds and it
# depends on the definition of SRE_CODE (see sre.h).
# 2**128 should be big enough to overflow on both. For smaller values
# a RuntimeError is raised instead of OverflowError.
long_overflow = 2**128
self.assertRaises(TypeError, re.finditer, "a", {})
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
def run_re_tests():
from test.re_tests import benchmarks, tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print 'Running re_tests test suite'
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError, ('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print '=== Syntax error:', t
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print '*** Unexpected error ***', t
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error, msg:
print '=== Unexpected exception', t, repr(msg)
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print '=== Succeeded incorrectly', t
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print '=== grouping error', t,
print repr(repl) + ' should be ' + repr(expected)
else:
print '=== Failed incorrectly', t
# Try the match on a unicode string, and check that it
# still succeeds.
try:
result = obj.search(unicode(s, "latin-1"))
if result is None:
print '=== Fails on unicode match', t
except NameError:
continue # 1.5.2
except TypeError:
continue # unicode test case
# Try the match on a unicode pattern, and check that it
# still succeeds.
obj=re.compile(unicode(pattern, "latin-1"))
result = obj.search(s)
if result is None:
print '=== Fails on unicode pattern match', t
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print '=== Failed on range-limited match', t
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print '=== Fails on case-insensitive match', t
# Try the match with LOCALE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print '=== Fails on locale-sensitive match', t
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print '=== Fails on unicode-sensitive match', t
def test_main():
run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
|
mancoast/CPythonPyc_test
|
crash/265_test_re.py
|
Python
|
gpl-3.0
| 37,806
|
#!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2020-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Show information about the OpenGL setup."""
from PyQt5.QtGui import (QOpenGLContext, QOpenGLVersionProfile,
QOffscreenSurface, QGuiApplication)
app = QGuiApplication([])
surface = QOffscreenSurface()
surface.create()
ctx = QOpenGLContext()
ok = ctx.create()
assert ok
ok = ctx.makeCurrent(surface)
assert ok
print(f"GLES: {ctx.isOpenGLES()}")
vp = QOpenGLVersionProfile()
vp.setVersion(2, 0)
vf = ctx.versionFunctions(vp)
print(f"Vendor: {vf.glGetString(vf.GL_VENDOR)}")
print(f"Renderer: {vf.glGetString(vf.GL_RENDERER)}")
print(f"Version: {vf.glGetString(vf.GL_VERSION)}")
print(f"Shading language version: {vf.glGetString(vf.GL_SHADING_LANGUAGE_VERSION)}")
ctx.doneCurrent()
|
qutebrowser/qutebrowser
|
scripts/opengl_info.py
|
Python
|
gpl-3.0
| 1,537
|
#!/usr/bin/env python
from __future__ import print_function
import os
import cgi
from subprocess import Popen, PIPE, STDOUT
# Java
SCRIPTDIR = 'javaprolog'
# SCRIPT = ['/usr/bin/java', '-cp', 'json-simple-1.1.1.jar:gnuprologjava-0.2.6.jar:.', 'Shrdlite']
import platform
if platform.system()=='Windows':
SCRIPT = ['java', '-cp', 'json-simple-1.1.1.jar;gnuprologjava-0.2.6.jar;.', 'Shrdlite']
else:
SCRIPT = ['java', '-cp', 'json-simple-1.1.1.jar:gnuprologjava-0.2.6.jar:.', 'Shrdlite']
# # SWI Prolog
# SCRIPTDIR = 'javaprolog'
# SCRIPT = ['/usr/local/bin/swipl', '-q', '-g', 'main,halt', '-t', 'halt(1)', '-s', 'shrdlite.pl']
# # Haskell
# SCRIPTDIR = 'haskell'
# SCRIPT = ['/usr/bin/runhaskell', 'Shrdlite.hs']
# Python
# SCRIPTDIR = 'python'
# SCRIPT = ['/usr/bin/python', 'shrdlite.py']
while not os.path.isdir(SCRIPTDIR):
SCRIPTDIR = os.path.join("..", SCRIPTDIR)
print('Content-type:text/plain')
print()
try:
form = cgi.FieldStorage()
data = form.getfirst('data')
script = Popen(SCRIPT, cwd=SCRIPTDIR, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = script.communicate(data)
print(out)
if err:
raise Exception(err)
except:
import sys, traceback
print(traceback.format_exc())
sys.exit(1)
|
cthGoman/shrdlite
|
cgi-bin/ajaxwrapper.py
|
Python
|
gpl-3.0
| 1,269
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Title: logger
Author: David Leclerc
Version: 0.1
Date: 13.04.2018
License: GNU General Public License, Version 3
(http://www.gnu.org/licenses/gpl.html)
Overview: This is a script that generates a logging instance.
Notes: ...
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# LIBRARIES
import datetime
# USER LIBRARIES
import lib
import path
# CONSTANTS
LEVELS = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
DEFAULT_LEVEL = "INFO"
# CLASSES
class Logger(object):
def __init__(self, name, report = "loop.log", level = DEFAULT_LEVEL):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INIT
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Store logger name
self.name = name
# Get level index
self.level = LEVELS.index(level)
# Define logging format
self.fmt = "[{:%H:%M:%S.%f}] [{:>16}] [{:>8}] --- {}"
# Define report
self.report = report
def log(self, level, msg, show = True):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LOG
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Does level allow logging?
if LEVELS.index(level) >= self.level:
# Get current time
now = datetime.datetime.now()
# Format message
msg = self.fmt.format(now, self.name, level, msg)
# Define log directory and touch it
directory = path.Path(path.REPORTS.path + lib.formatDate(now))
directory.touch()
# Log message
with open(directory.path + self.report, "a") as f:
f.write(msg + "\n")
# Print to terminal
if show:
print msg
def debug(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
DEBUG
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("DEBUG", msg)
def info(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
INFO
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("INFO", msg)
def warning(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
WARNING
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("WARNING", msg)
def error(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ERROR
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("ERROR", msg)
def critical(self, msg):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CRITICAL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Log message
self.log("CRITICAL", msg)
def main():
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MAIN
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Instanciate logger
logger = Logger(__name__)
# Write
logger.info("Test")
# Run this when script is called from terminal
if __name__ == "__main__":
main()
|
mm22dl/MeinKPS
|
logger.py
|
Python
|
gpl-3.0
| 3,955
|
#!/usr/bin/env python
import re, logging, copy
from threading import Lock
class Tagger(object):
def __init__(self, hosts_attr="fields.hosts", hosts_sep=":", tag_file="tags_jobs.safe"):
self.tags_by_host = {}
self.hosts_sep = str(hosts_sep)
self.hosts_attr = str(hosts_attr)
self.tag_file = tag_file
self.lock = Lock()
def add(self, measurement):
hosts = measurement.get_attr(self.hosts_attr)
if not hosts:
logging.error("Measurement does not have the attribute with host list %s" % self.hosts_attr)
return False
hostlist = re.split(self.hosts_sep, hosts.strip("'").strip("\""))
tags = measurement.get_all_tags()
if self.hosts_attr in tags:
del tags[self.hosts_attr]
self.lock.acquire()
for h in hostlist:
if h in self.tags_by_host:
logging.info("Host %s already registered for key %s. Overwrite exiting mapping" % (h, self.tags_by_host[h],))
logging.info("Add Host %s with tags %s" % (h, str(tags),))
self.tags_by_host[h] = tags
self.lock.release()
return True
def delete(self, measurement):
hosts = measurement.get_attr(self.hosts_attr)
if not hosts:
logging.error("Measurement does not have the attribute with host list %s" % self.hosts_attr)
return False
hostlist = re.split(self.hosts_sep, hosts.strip("'").strip("\""))
self.lock.acquire()
for h in hostlist:
if h in self.tags_by_host:
logging.info("Delete Host %s with tags %s" % (h, str(self.tags_by_host[h]),))
del self.tags_by_host[h]
self.lock.release()
return True
def get_tags_by_host(self, host):
if host not in self.tags_by_host:
return {}
return copy.deepcopy(self.tags_by_host[host])
def get_all_tags(self):
return self.tags_by_host
def get_all_active_hosts(self):
return sorted(self.tags_by_host.keys())
def host_active(self, host):
return host in self.tags_by_host
def store(self):
f = open(self.tag_file, "w")
f.write(json.dumps(self.tags_by_host, sort_keys=True, indent=4, separators=(',', ': ')))
f.close()
def restore(self):
f = open(self.tag_file, "r")
self.lock.acquire()
self.tags_by_host = json.loads(f.read())
self.lock.release()
f.close()
|
RRZE-HPC/LMS
|
midware/influxdbrouter/influxdbrouter/tagstore.py
|
Python
|
gpl-3.0
| 2,579
|