repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
shuaiharry/QEMU
|
refs/heads/master
|
scripts/qapi-visit.py
|
13
|
#
# QAPI visitor generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Michael Roth <mdroth@linux.vnet.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_visit_struct_body(field_prefix, members):
ret = ""
if len(field_prefix):
field_prefix = field_prefix + "."
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
visit_start_optional(m, (obj && *obj) ? &(*obj)->%(c_prefix)shas_%(c_name)s : NULL, "%(name)s", errp);
if ((*obj)->%(prefix)shas_%(c_name)s) {
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
c_name=c_var(argname), name=argname)
push_indent()
if structured:
ret += mcgen('''
visit_start_struct(m, NULL, "", "%(name)s", 0, errp);
''',
name=argname)
ret += generate_visit_struct_body(field_prefix + argname, argentry)
ret += mcgen('''
visit_end_struct(m, errp);
''')
else:
ret += mcgen('''
visit_type_%(type)s(m, (obj && *obj) ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "%(name)s", errp);
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
type=type_name(argentry), c_name=c_var(argname),
name=argname)
if optional:
pop_indent()
ret += mcgen('''
}
visit_end_optional(m, errp);
''')
return ret
def generate_visit_struct(name, members):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), errp);
''',
name=name)
push_indent()
ret += generate_visit_struct_body("", members)
pop_indent()
ret += mcgen('''
visit_end_struct(m, errp);
}
''')
return ret
def generate_visit_list(name, members):
return mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp)
{
GenericList *i;
visit_start_list(m, name, errp);
for (i = visit_next_list(m, (GenericList **)obj, errp); i; i = visit_next_list(m, &i, errp)) {
%(name)sList *native_i = (%(name)sList *)i;
visit_type_%(name)s(m, &native_i->value, NULL, errp);
}
visit_end_list(m, errp);
}
''',
name=name)
def generate_visit_enum(name, members):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp)
{
visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp);
}
''',
name=name)
def generate_visit_union(name, members):
ret = generate_visit_enum('%sKind' % name, members.keys())
ret += mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
}
''',
name=name)
return ret
def generate_declaration(name, members, genlist=True):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp);
''',
name=name)
if genlist:
ret += mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp);
''',
name=name)
return ret
def generate_decl_enum(name, members, genlist=True):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp);
''',
name=name)
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "p:o:", ["prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-visit.c'
h_file = 'qapi-visit.h'
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
fdef = open(c_file, 'w')
fdecl = open(h_file, 'w')
fdef.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor functions
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "%(header)s"
''',
header=basename(h_file)))
fdecl.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor function
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "qapi/qapi-visit-core.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix, guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
for expr in exprs:
if expr.has_key('type'):
ret = generate_visit_struct(expr['type'], expr['data'])
ret += generate_visit_list(expr['type'], expr['data'])
fdef.write(ret)
ret = generate_declaration(expr['type'], expr['data'])
fdecl.write(ret)
elif expr.has_key('union'):
ret = generate_visit_union(expr['union'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum('%sKind' % expr['union'], expr['data'].keys())
ret += generate_declaration(expr['union'], expr['data'])
fdecl.write(ret)
elif expr.has_key('enum'):
ret = generate_visit_enum(expr['enum'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum(expr['enum'], expr['data'])
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
|
followyourheart/SFrame
|
refs/heads/master
|
oss_src/unity/python/sframe/toolkits/image_analysis/__init__.py
|
9
|
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
__all__ = ['image_analysis']
import image_analysis
|
CVML/scikit-learn
|
refs/heads/master
|
examples/exercises/plot_iris_exercise.py
|
323
|
"""
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
|
rembo10/headphones
|
refs/heads/master
|
lib/unidecode/x074.py
|
252
|
data = (
'Han ', # 0x00
'Xuan ', # 0x01
'Yan ', # 0x02
'Qiu ', # 0x03
'Quan ', # 0x04
'Lang ', # 0x05
'Li ', # 0x06
'Xiu ', # 0x07
'Fu ', # 0x08
'Liu ', # 0x09
'Ye ', # 0x0a
'Xi ', # 0x0b
'Ling ', # 0x0c
'Li ', # 0x0d
'Jin ', # 0x0e
'Lian ', # 0x0f
'Suo ', # 0x10
'Chiisai ', # 0x11
'[?] ', # 0x12
'Wan ', # 0x13
'Dian ', # 0x14
'Pin ', # 0x15
'Zhan ', # 0x16
'Cui ', # 0x17
'Min ', # 0x18
'Yu ', # 0x19
'Ju ', # 0x1a
'Chen ', # 0x1b
'Lai ', # 0x1c
'Wen ', # 0x1d
'Sheng ', # 0x1e
'Wei ', # 0x1f
'Dian ', # 0x20
'Chu ', # 0x21
'Zhuo ', # 0x22
'Pei ', # 0x23
'Cheng ', # 0x24
'Hu ', # 0x25
'Qi ', # 0x26
'E ', # 0x27
'Kun ', # 0x28
'Chang ', # 0x29
'Qi ', # 0x2a
'Beng ', # 0x2b
'Wan ', # 0x2c
'Lu ', # 0x2d
'Cong ', # 0x2e
'Guan ', # 0x2f
'Yan ', # 0x30
'Diao ', # 0x31
'Bei ', # 0x32
'Lin ', # 0x33
'Qin ', # 0x34
'Pi ', # 0x35
'Pa ', # 0x36
'Que ', # 0x37
'Zhuo ', # 0x38
'Qin ', # 0x39
'Fa ', # 0x3a
'[?] ', # 0x3b
'Qiong ', # 0x3c
'Du ', # 0x3d
'Jie ', # 0x3e
'Hun ', # 0x3f
'Yu ', # 0x40
'Mao ', # 0x41
'Mei ', # 0x42
'Chun ', # 0x43
'Xuan ', # 0x44
'Ti ', # 0x45
'Xing ', # 0x46
'Dai ', # 0x47
'Rou ', # 0x48
'Min ', # 0x49
'Zhen ', # 0x4a
'Wei ', # 0x4b
'Ruan ', # 0x4c
'Huan ', # 0x4d
'Jie ', # 0x4e
'Chuan ', # 0x4f
'Jian ', # 0x50
'Zhuan ', # 0x51
'Yang ', # 0x52
'Lian ', # 0x53
'Quan ', # 0x54
'Xia ', # 0x55
'Duan ', # 0x56
'Yuan ', # 0x57
'Ye ', # 0x58
'Nao ', # 0x59
'Hu ', # 0x5a
'Ying ', # 0x5b
'Yu ', # 0x5c
'Huang ', # 0x5d
'Rui ', # 0x5e
'Se ', # 0x5f
'Liu ', # 0x60
'Shi ', # 0x61
'Rong ', # 0x62
'Suo ', # 0x63
'Yao ', # 0x64
'Wen ', # 0x65
'Wu ', # 0x66
'Jin ', # 0x67
'Jin ', # 0x68
'Ying ', # 0x69
'Ma ', # 0x6a
'Tao ', # 0x6b
'Liu ', # 0x6c
'Tang ', # 0x6d
'Li ', # 0x6e
'Lang ', # 0x6f
'Gui ', # 0x70
'Zhen ', # 0x71
'Qiang ', # 0x72
'Cuo ', # 0x73
'Jue ', # 0x74
'Zhao ', # 0x75
'Yao ', # 0x76
'Ai ', # 0x77
'Bin ', # 0x78
'Tu ', # 0x79
'Chang ', # 0x7a
'Kun ', # 0x7b
'Zhuan ', # 0x7c
'Cong ', # 0x7d
'Jin ', # 0x7e
'Yi ', # 0x7f
'Cui ', # 0x80
'Cong ', # 0x81
'Qi ', # 0x82
'Li ', # 0x83
'Ying ', # 0x84
'Suo ', # 0x85
'Qiu ', # 0x86
'Xuan ', # 0x87
'Ao ', # 0x88
'Lian ', # 0x89
'Man ', # 0x8a
'Zhang ', # 0x8b
'Yin ', # 0x8c
'[?] ', # 0x8d
'Ying ', # 0x8e
'Zhi ', # 0x8f
'Lu ', # 0x90
'Wu ', # 0x91
'Deng ', # 0x92
'Xiou ', # 0x93
'Zeng ', # 0x94
'Xun ', # 0x95
'Qu ', # 0x96
'Dang ', # 0x97
'Lin ', # 0x98
'Liao ', # 0x99
'Qiong ', # 0x9a
'Su ', # 0x9b
'Huang ', # 0x9c
'Gui ', # 0x9d
'Pu ', # 0x9e
'Jing ', # 0x9f
'Fan ', # 0xa0
'Jin ', # 0xa1
'Liu ', # 0xa2
'Ji ', # 0xa3
'[?] ', # 0xa4
'Jing ', # 0xa5
'Ai ', # 0xa6
'Bi ', # 0xa7
'Can ', # 0xa8
'Qu ', # 0xa9
'Zao ', # 0xaa
'Dang ', # 0xab
'Jiao ', # 0xac
'Gun ', # 0xad
'Tan ', # 0xae
'Hui ', # 0xaf
'Huan ', # 0xb0
'Se ', # 0xb1
'Sui ', # 0xb2
'Tian ', # 0xb3
'[?] ', # 0xb4
'Yu ', # 0xb5
'Jin ', # 0xb6
'Lu ', # 0xb7
'Bin ', # 0xb8
'Shou ', # 0xb9
'Wen ', # 0xba
'Zui ', # 0xbb
'Lan ', # 0xbc
'Xi ', # 0xbd
'Ji ', # 0xbe
'Xuan ', # 0xbf
'Ruan ', # 0xc0
'Huo ', # 0xc1
'Gai ', # 0xc2
'Lei ', # 0xc3
'Du ', # 0xc4
'Li ', # 0xc5
'Zhi ', # 0xc6
'Rou ', # 0xc7
'Li ', # 0xc8
'Zan ', # 0xc9
'Qiong ', # 0xca
'Zhe ', # 0xcb
'Gui ', # 0xcc
'Sui ', # 0xcd
'La ', # 0xce
'Long ', # 0xcf
'Lu ', # 0xd0
'Li ', # 0xd1
'Zan ', # 0xd2
'Lan ', # 0xd3
'Ying ', # 0xd4
'Mi ', # 0xd5
'Xiang ', # 0xd6
'Xi ', # 0xd7
'Guan ', # 0xd8
'Dao ', # 0xd9
'Zan ', # 0xda
'Huan ', # 0xdb
'Gua ', # 0xdc
'Bo ', # 0xdd
'Die ', # 0xde
'Bao ', # 0xdf
'Hu ', # 0xe0
'Zhi ', # 0xe1
'Piao ', # 0xe2
'Ban ', # 0xe3
'Rang ', # 0xe4
'Li ', # 0xe5
'Wa ', # 0xe6
'Dekaguramu ', # 0xe7
'Jiang ', # 0xe8
'Qian ', # 0xe9
'Fan ', # 0xea
'Pen ', # 0xeb
'Fang ', # 0xec
'Dan ', # 0xed
'Weng ', # 0xee
'Ou ', # 0xef
'Deshiguramu ', # 0xf0
'Miriguramu ', # 0xf1
'Thon ', # 0xf2
'Hu ', # 0xf3
'Ling ', # 0xf4
'Yi ', # 0xf5
'Ping ', # 0xf6
'Ci ', # 0xf7
'Hekutogura ', # 0xf8
'Juan ', # 0xf9
'Chang ', # 0xfa
'Chi ', # 0xfb
'Sarake ', # 0xfc
'Dang ', # 0xfd
'Meng ', # 0xfe
'Pou ', # 0xff
)
|
Jenselme/AutobahnPython
|
refs/heads/master
|
examples/twisted/websocket/wrapping/server.py
|
2
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from twisted.internet.protocol import Protocol
class HelloServerProtocol(Protocol):
def connectionMade(self):
print("connectionMade", self.transport.getHost(), self.transport.getPeer())
self.transport.write('how are you?' * 100)
def dataReceived(self, data):
print("dataReceived: {}".format(data))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from autobahn.twisted.websocket import WrappingWebSocketServerFactory
log.startLogging(sys.stdout)
wrappedFactory = Factory.forProtocol(HelloServerProtocol)
factory = WrappingWebSocketServerFactory(wrappedFactory,
u"ws://127.0.0.1:9000",
enableCompression=False,
autoFragmentSize=1024)
reactor.listenTCP(9000, factory)
reactor.run()
|
deepak7mahto/Web_Scrapper_3010
|
refs/heads/master
|
Web_Scrapper_Generic/Web_Scrapper_Generic/examples/e2.py
|
1
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from PyQt4 import QtCore, QtGui
class MyDialog(QtGui.QDialog):
def __init__(self, parent=None):
super(MyDialog, self).__init__(parent)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.textBrowser = QtGui.QTextBrowser(self)
self.textBrowser.append("This is a QTextBrowser!")
self.verticalLayout = QtGui.QVBoxLayout(self)
self.verticalLayout.addWidget(self.textBrowser)
self.verticalLayout.addWidget(self.buttonBox)
class MyWindow(QtGui.QWidget):
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
self.pushButtonWindow = QtGui.QPushButton(self)
self.pushButtonWindow.setText("Click Me!")
self.pushButtonWindow.clicked.connect(self.on_pushButton_clicked)
self.layout = QtGui.QHBoxLayout(self)
self.layout.addWidget(self.pushButtonWindow)
self.dialogTextBrowser = MyDialog(self)
@QtCore.pyqtSlot()
def on_pushButton_clicked(self):
self.dialogTextBrowser.exec_()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
app.setApplicationName('MyWindow')
main = MyWindow()
main.show()
sys.exit(app.exec_())
|
ygenc/onlineLDA
|
refs/heads/master
|
onlineldavb_new/build/scipy/scipy/sparse/linalg/isolve/tests/demo_lgmres.py
|
10
|
import scipy.sparse.linalg as la
import scipy.sparse as sp
import scipy.io as io
import numpy as np
import sys
#problem = "SPARSKIT/drivcav/e05r0100"
problem = "SPARSKIT/drivcav/e05r0200"
#problem = "Harwell-Boeing/sherman/sherman1"
#problem = "misc/hamm/add32"
mm = np.lib._datasource.Repository('ftp://math.nist.gov/pub/MatrixMarket2/')
f = mm.open('%s.mtx.gz' % problem)
Am = io.mmread(f).tocsr()
f.close()
f = mm.open('%s_rhs1.mtx.gz' % problem)
b = np.array(io.mmread(f)).ravel()
f.close()
count = [0]
def matvec(v):
count[0] += 1
sys.stderr.write('%d\r' % count[0])
return Am*v
A = la.LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
M = 100
print "MatrixMarket problem %s" % problem
print "Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz)
count[0] = 0
x0, info = la.gmres(A, b, restrt=M, tol=1e-14)
count_0 = count[0]
err0 = np.linalg.norm(Am*x0 - b) / np.linalg.norm(b)
print "GMRES(%d):" % M, count_0, "matvecs, residual", err0
if info != 0:
print "Didn't converge"
count[0] = 0
x1, info = la.lgmres(A, b, inner_m=M-6*2, outer_k=6, tol=1e-14)
count_1 = count[0]
err1 = np.linalg.norm(Am*x1 - b) / np.linalg.norm(b)
print "LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1, \
"matvecs, residual:", err1
if info != 0:
print "Didn't converge"
count[0] = 0
x2, info = la.lgmres(A, b, inner_m=M-6, outer_k=6, tol=1e-14)
count_2 = count[0]
err2 = np.linalg.norm(Am*x2 - b) / np.linalg.norm(b)
print "LGMRES(%d,6) [same subspace size]:" % (M-6), count_2, \
"matvecs, residual:", err2
if info != 0:
print "Didn't converge"
|
jarvys/django-1.7-jdb
|
refs/heads/master
|
tests/get_earliest_or_latest/tests.py
|
41
|
from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from .models import Article, Person
class EarliestOrLatestTests(TestCase):
"""Tests for the earliest() and latest() objects methods"""
def tearDown(self):
"""Makes sure Article has a get_latest_by"""
if not Article._meta.get_latest_by:
Article._meta.get_latest_by = 'pub_date'
def test_earliest(self):
# Because no Articles exist yet, earliest() raises ArticleDoesNotExist.
self.assertRaises(Article.DoesNotExist, Article.objects.earliest)
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 8, 27)
)
Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the earliest Article.
self.assertEqual(Article.objects.earliest(), a1)
# Get the earliest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).earliest(),
a2
)
# Pass a custom field name to earliest() to change the field that's used
# to determine the earliest object.
self.assertEqual(Article.objects.earliest('expire_date'), a2)
self.assertEqual(Article.objects.filter(
pub_date__gt=datetime(2005, 7, 26)).earliest('expire_date'), a2)
# Ensure that earliest() overrides any other ordering specified on the
# query. Refs #11283.
self.assertEqual(Article.objects.order_by('id').earliest(), a1)
# Ensure that error is raised if the user forgot to add a get_latest_by
# in the Model.Meta
Article.objects.model._meta.get_latest_by = None
self.assertRaisesMessage(
AssertionError,
"earliest() and latest() require either a field_name parameter or "
"'get_latest_by' in the model",
lambda: Article.objects.earliest(),
)
def test_latest(self):
# Because no Articles exist yet, latest() raises ArticleDoesNotExist.
self.assertRaises(Article.DoesNotExist, Article.objects.latest)
a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26),
expire_date=datetime(2005, 9, 1)
)
Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 7, 28)
)
a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 27),
expire_date=datetime(2005, 8, 27)
)
a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28),
expire_date=datetime(2005, 7, 30)
)
# Get the latest Article.
self.assertEqual(Article.objects.latest(), a4)
# Get the latest Article that matches certain filters.
self.assertEqual(
Article.objects.filter(pub_date__lt=datetime(2005, 7, 27)).latest(),
a1
)
# Pass a custom field name to latest() to change the field that's used
# to determine the latest object.
self.assertEqual(Article.objects.latest('expire_date'), a1)
self.assertEqual(
Article.objects.filter(pub_date__gt=datetime(2005, 7, 26)).latest('expire_date'),
a3,
)
# Ensure that latest() overrides any other ordering specified on the query. Refs #11283.
self.assertEqual(Article.objects.order_by('id').latest(), a4)
# Ensure that error is raised if the user forgot to add a get_latest_by
# in the Model.Meta
Article.objects.model._meta.get_latest_by = None
self.assertRaisesMessage(
AssertionError,
"earliest() and latest() require either a field_name parameter or "
"'get_latest_by' in the model",
lambda: Article.objects.latest(),
)
def test_latest_manual(self):
# You can still use latest() with a model that doesn't have
# "get_latest_by" set -- just pass in the field name manually.
Person.objects.create(name="Ralph", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Stephanie", birthday=datetime(1960, 2, 3))
self.assertRaises(AssertionError, Person.objects.latest)
self.assertEqual(Person.objects.latest("birthday"), p2)
def test_first(self):
p1 = Person.objects.create(name="Bob", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(name="Alice", birthday=datetime(1961, 2, 3))
self.assertEqual(
Person.objects.first(), p1)
self.assertEqual(
Person.objects.order_by('name').first(), p2)
self.assertEqual(
Person.objects.filter(birthday__lte=datetime(1955, 1, 1)).first(),
p1)
self.assertIs(
Person.objects.filter(birthday__lte=datetime(1940, 1, 1)).first(),
None)
def test_last(self):
p1 = Person.objects.create(
name="Alice", birthday=datetime(1950, 1, 1))
p2 = Person.objects.create(
name="Bob", birthday=datetime(1960, 2, 3))
# Note: by default PK ordering.
self.assertEqual(
Person.objects.last(), p2)
self.assertEqual(
Person.objects.order_by('-name').last(), p1)
self.assertEqual(
Person.objects.filter(birthday__lte=datetime(1955, 1, 1)).last(),
p1)
self.assertIs(
Person.objects.filter(birthday__lte=datetime(1940, 1, 1)).last(),
None)
|
manfer/LFP.bundle
|
refs/heads/master
|
Contents/Code/__init__.py
|
1
|
# -*- coding: utf-8 -*-
TITLE = u'LFP'
PREFIX = '/video/lfp'
LFP_BASE_URL = 'http://www.laliga.es'
LFP_MULTIMEDIA = '%s/multimedia' % LFP_BASE_URL
LFP_ICON = 'lfp.png'
ICON = 'default-icon.png'
LFP_HL_ICON = 'highlights.png'
LFP_VIDEO_ICON = 'video.png'
LFP_PHOTO_ICON = 'photo.png'
LFP_LALIGATV_ICON = 'laligatv.png'
SEARCH_ICON = 'search-icon.png'
SETTINGS_ICON = 'settings-icon.png'
ART = 'futbol.jpg'
HTTP_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Connection': 'keep-alive',
'Origin': LFP_BASE_URL,
'Referer': LFP_MULTIMEDIA
}
from lfputil import L
from lfpvideo import *
from lfpfoto import *
from laligatv import *
from lfpsearch import *
################################################################################
def Start():
Plugin.AddViewGroup('List', viewMode='List', mediaType='items')
Plugin.AddViewGroup('InfoList', viewMode='InfoList', mediaType='items')
Plugin.AddViewGroup('PanelStream', viewMode='PanelStream', mediaType='items')
ObjectContainer.title1 = TITLE
#ObjectContainer.view_group = 'List'
ObjectContainer.art = R(ART)
DirectoryObject.thumb = R(ICON)
DirectoryObject.art = R(ART)
PhotoAlbumObject.thumb = R(ICON)
HTTP.CacheTime = CACHE_1HOUR
################################################################################
@handler(PREFIX, TITLE, art=ART, thumb=LFP_ICON)
def lfp_main_menu():
oc = ObjectContainer()
oc.add(DirectoryObject(
key = Callback(lfp_resumenes),
title = L("Highlights"),
summary = L("enjoy lfp highlight videos"),
thumb = R(LFP_HL_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_videos),
title = L("Other Videos"),
summary = L("enjoy other videos on lfp website"),
thumb = R(LFP_VIDEO_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_fotos),
title = L("Photos"),
summary = L("enjoy the photos on lfp website"),
thumb = R(LFP_PHOTO_ICON)
))
oc.add(DirectoryObject(
key = Callback(lfp_laligatv),
title = L("La Liga TV"),
summary = L("enjoy live Adelante League matches"),
thumb = R(LFP_LALIGATV_ICON)
))
if Client.Product != 'PlexConnect':
oc.add(InputDirectoryObject(
key = Callback(lfp_search),
title = L('Search LFP Videos'),
prompt = L('Search for LFP Videos'),
summary = L('Search for LFP Videos'),
thumb = R(SEARCH_ICON)
))
return oc
|
douglas-larocca/turses
|
refs/heads/master
|
tests/test_core.py
|
2
|
# -*- coding: utf-8 -*-
from sys import path
path.append('../')
import unittest
from mock import Mock
from . import create_status
from turses.models import TimelineList
from turses.api.helpers import (
is_home_timeline,
is_user_timeline,
is_own_timeline,
is_mentions_timeline,
is_messages_timeline,
is_thread_timeline,
)
from turses.config import configuration
from turses.core import InputHandler, Controller
from turses.api.debug import MockApi
class InputHandlerTest(unittest.TestCase):
# - Helpers ---------------------------------------------------------------
def executes(self, commands):
"""Assert that calling the key handlers `handle` method with all
the keys corresponding to the commands in `commands` calls the
handler for that command."""
for command in commands:
handler = commands[command]
key = self.key(command)
self.key_handler.handle(key)
self.failUnless(handler.called)
def does_not_execute(self, commands):
"""Assert that calling the key handlers `handle` method with all
the keys corresponding to the commands in `commands` DOES NOT call the
handler for that command."""
for command in commands:
handler = commands[command]
key = self.key(command)
self.key_handler.handle(key)
self.failIf(handler.called)
def key(self, command):
key, _ = configuration.key_bindings[command]
return key
# - Tests -----------------------------------------------------------------
def setUp(self):
self.controller = Mock(Controller)
self.key_handler = InputHandler(self.controller)
return_false = Mock(return_value=False)
self.controller.is_in_info_mode = return_false
self.controller.is_in_timeline_mode = return_false
self.controller.is_in_help_mode = return_false
self.controller.is_in_user_info_mode = return_false
self.controller.is_in_editor_mode = return_false
def test_info_mode(self):
self.controller.is_in_info_mode = Mock(return_value=True)
# execute
self.executes(self.key_handler.TURSES_COMMANDS)
self.executes(self.key_handler.TIMELINE_COMMANDS)
# don't execute
self.does_not_execute(self.key_handler.MOTION_COMMANDS)
self.does_not_execute(self.key_handler.BUFFER_COMMANDS)
self.does_not_execute(self.key_handler.TWITTER_COMMANDS)
self.does_not_execute(self.key_handler.EXTERNAL_PROGRAM_COMMANDS)
def test_timeline_mode(self):
self.controller.is_in_timeline_mode = Mock(return_value=True)
self.executes(self.key_handler.TURSES_COMMANDS)
self.executes(self.key_handler.MOTION_COMMANDS)
self.executes(self.key_handler.BUFFER_COMMANDS)
self.executes(self.key_handler.TIMELINE_COMMANDS)
self.executes(self.key_handler.TWITTER_COMMANDS)
self.executes(self.key_handler.EXTERNAL_PROGRAM_COMMANDS)
def test_help_mode(self):
self.controller.is_in_help_mode = Mock(return_value=True)
# execute
self.executes(self.key_handler.TURSES_COMMANDS)
self.executes(self.key_handler.MOTION_COMMANDS)
# don't execute
self.does_not_execute(self.key_handler.TIMELINE_COMMANDS)
self.does_not_execute(self.key_handler.BUFFER_COMMANDS)
self.does_not_execute(self.key_handler.TWITTER_COMMANDS)
self.does_not_execute(self.key_handler.EXTERNAL_PROGRAM_COMMANDS)
def test_editor_mode(self):
self.controller.is_in_editor_mode = Mock(return_value=True)
self.does_not_execute(self.key_handler.TURSES_COMMANDS)
self.does_not_execute(self.key_handler.MOTION_COMMANDS)
self.does_not_execute(self.key_handler.TIMELINE_COMMANDS)
self.does_not_execute(self.key_handler.BUFFER_COMMANDS)
self.does_not_execute(self.key_handler.TWITTER_COMMANDS)
self.does_not_execute(self.key_handler.EXTERNAL_PROGRAM_COMMANDS)
for key in "ABCDEFGHIJKLMNÑOPQRSTUVWXYZabcdefghijklmnñopqrstuvwxyz":
self.key_handler.handle(key)
self.controller.forward_to_editor.assert_called_with(key)
class ControllerTest(unittest.TestCase):
def setUp(self):
self.timelines = TimelineList()
self.controller = Controller(ui=Mock(),
api=MockApi('foo', 'bar'),
timelines=self.timelines)
def test_append_home_timeline(self):
self.controller.append_home_timeline()
appended_timeline = self.timelines[-1]
self.assertTrue(is_home_timeline(appended_timeline))
def test_append_user_timeline(self):
user = 'dialelo'
self.controller.append_user_timeline(user)
appended_timeline = self.timelines[-1]
self.assertTrue(is_user_timeline(appended_timeline))
self.assertEqual(appended_timeline._kwargs, {'screen_name': user})
def test_own_tweets_timeline(self):
self.controller.append_own_tweets_timeline()
appended_timeline = self.timelines[-1]
self.assertTrue(is_own_timeline(appended_timeline))
def test_mentions_timeline(self):
self.controller.append_mentions_timeline()
appended_timeline = self.timelines[-1]
self.assertTrue(is_mentions_timeline(appended_timeline))
def test_direct_messages_timeline(self):
self.controller.append_direct_messages_timeline()
appended_timeline = self.timelines[-1]
self.assertTrue(is_messages_timeline(appended_timeline))
def test_thread_timeline(self):
active_timeline = self.controller.timelines.active
active_timeline.add_status(create_status())
# make sure that there is at least one status in the active timeline
self.assertTrue(active_timeline.active)
self.controller.append_thread_timeline()
appended_timeline = self.timelines[-1]
self.assertTrue(is_thread_timeline(appended_timeline))
# TODO: test `append_search_timeline`
# TODO: test `append_retweets_of_me_timeline`
if __name__ == '__main__':
unittest.main()
|
TakashiSasaki/ns-3-nat
|
refs/heads/master
|
examples/udp/examples-to-run.py
|
199
|
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("udp-echo", "True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = []
|
yunqu/PYNQ
|
refs/heads/master
|
pynq/lib/pmod/pmod_grove_imu.py
|
4
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
from . import Pmod
from . import PMOD_GROVE_G3
from . import PMOD_GROVE_G4
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
PMOD_GROVE_IMU_PROGRAM = "pmod_grove_imu.bin"
CONFIG_IOP_SWITCH = 0x1
GET_ACCL_DATA = 0x3
GET_GYRO_DATA = 0x5
GET_COMPASS_DATA = 0x7
GET_TEMPERATURE = 0xB
GET_PRESSURE = 0xD
RESET = 0xF
def _reg2float(reg):
"""Converts 32-bit register value to floats in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
float
A float number translated from the register value.
"""
if reg == 0:
return 0.0
sign = (reg & 0x80000000) >> 31 & 0x01
exp = ((reg & 0x7f800000) >> 23) - 127
if exp == 0:
man = (reg & 0x007fffff) / pow(2, 23)
else:
man = 1 + (reg & 0x007fffff) / pow(2, 23)
result = pow(2, exp) * man * ((sign * -2) + 1)
return float("{0:.2f}".format(result))
def _reg2int(reg):
"""Converts 32-bit register value to signed integer in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
int
A signed integer translated from the register value.
"""
result = -(reg >> 31 & 0x1) * (1 << 31)
for i in range(31):
result += (reg >> i & 0x1) * (1 << i)
return result
class Grove_IMU(object):
"""This class controls the Grove IIC IMU.
Grove IMU 10DOF is a combination of grove IMU 9DOF (MPU9250) and grove
barometer sensor (BMP180). MPU-9250 is a 9-axis motion tracking device
that combines a 3-axis gyroscope, 3-axis accelerometer, 3-axis
magnetometer and a Digital Motion Processor (DMP). BMP180 is a high
precision, low power digital pressure sensor. Hardware version: v1.1.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of an Grove IMU object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G3,
PMOD_GROVE_G4]:
raise ValueError("Group number can only be G3 - G4.")
self.microblaze = Pmod(mb_info, PMOD_GROVE_IMU_PROGRAM)
self.microblaze.write_mailbox(0, gr_pin)
self.microblaze.write_blocking_command(CONFIG_IOP_SWITCH)
self.reset()
def reset(self):
"""Reset all the sensors on the grove IMU.
Returns
-------
None
"""
self.microblaze.write_blocking_command(RESET)
def get_accl(self):
"""Get the data from the accelerometer.
Returns
-------
list
A list of the acceleration data along X-axis, Y-axis, and Z-axis.
"""
self.microblaze.write_blocking_command(GET_ACCL_DATA)
data = self.microblaze.read_mailbox(0, 3)
[ax, ay, az] = [_reg2int(i) for i in data]
return [float("{0:.2f}".format(ax/16384)),
float("{0:.2f}".format(ay/16384)),
float("{0:.2f}".format(az/16384))]
def get_gyro(self):
"""Get the data from the gyroscope.
Returns
-------
list
A list of the gyro data along X-axis, Y-axis, and Z-axis.
"""
self.microblaze.write_blocking_command(GET_GYRO_DATA)
data = self.microblaze.read_mailbox(0, 3)
[gx, gy, gz] = [_reg2int(i) for i in data]
return [float("{0:.2f}".format(gx*250/32768)),
float("{0:.2f}".format(gy*250/32768)),
float("{0:.2f}".format(gz*250/32768))]
def get_compass(self):
"""Get the data from the magnetometer.
Returns
-------
list
A list of the compass data along X-axis, Y-axis, and Z-axis.
"""
self.microblaze.write_blocking_command(GET_COMPASS_DATA)
data = self.microblaze.read_mailbox(0, 3)
[mx, my, mz] = [_reg2int(i) for i in data]
return [float("{0:.2f}".format(mx*1200/4096)),
float("{0:.2f}".format(my*1200/4096)),
float("{0:.2f}".format(mz*1200/4096))]
def get_heading(self):
"""Get the value of the heading.
Returns
-------
float
The angle deviated from the X-axis, toward the positive Y-axis.
"""
[mx, my, _] = self.get_compass()
heading = 180 * math.atan2(my, mx) / math.pi
if heading < 0:
heading += 360
return float("{0:.2f}".format(heading))
def get_tilt_heading(self):
"""Get the value of the tilt heading.
Returns
-------
float
The tilt heading value.
"""
[ax, ay, _] = self.get_accl()
[mx, my, mz] = self.get_compass()
try:
pitch = math.asin(-ax)
roll = math.asin(ay / math.cos(pitch))
except ZeroDivisionError:
raise RuntimeError("Value out of range or device not connected.")
xh = mx * math.cos(pitch) + mz * math.sin(pitch)
yh = mx * math.sin(roll) * math.sin(pitch) + \
my * math.cos(roll) - mz * math.sin(roll) * math.cos(pitch)
_ = -mx * math.cos(roll) * math.sin(pitch) + \
my * math.sin(roll) + mz * math.cos(roll) * math.cos(pitch)
tilt_heading = 180 * math.atan2(yh, xh) / math.pi
if yh < 0:
tilt_heading += 360
return float("{0:.2f}".format(tilt_heading))
def get_temperature(self):
"""Get the current temperature in degree C.
Returns
-------
float
The temperature value.
"""
self.microblaze.write_blocking_command(GET_TEMPERATURE)
value = self.microblaze.read_mailbox(0)
return _reg2float(value)
def get_pressure(self):
"""Get the current pressure in Pa.
Returns
-------
float
The pressure value.
"""
self.microblaze.write_blocking_command(GET_PRESSURE)
value = self.microblaze.read_mailbox(0)
return _reg2float(value)
def get_atm(self):
"""Get the current pressure in relative atmosphere.
Returns
-------
float
The related atmosphere.
"""
return float("{0:.2f}".format(self.get_pressure()/101325))
def get_altitude(self):
"""Get the current altitude.
Returns
-------
float
The altitude value.
"""
pressure = self.get_pressure()
a = pressure/101325
b = 1/5.255
c = 1-pow(a, b)
altitude = 44300 * c
return float("{0:.2f}".format(altitude))
|
prometheanfire/portage
|
refs/heads/master
|
pym/_emerge/EbuildBuild.py
|
2
|
# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildExecuter import EbuildExecuter
from _emerge.EbuildPhase import EbuildPhase
from _emerge.EbuildBinpkg import EbuildBinpkg
from _emerge.EbuildFetcher import EbuildFetcher
from _emerge.CompositeTask import CompositeTask
from _emerge.EbuildMerge import EbuildMerge
from _emerge.EbuildFetchonly import EbuildFetchonly
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.TaskSequence import TaskSequence
from portage.util import writemsg
import portage
from portage import os
from portage.output import colorize
from portage.package.ebuild.digestcheck import digestcheck
from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import _check_temp_dir
from portage.package.ebuild._spawn_nofetch import spawn_nofetch
class EbuildBuild(CompositeTask):
__slots__ = ("args_set", "config_pool", "find_blockers",
"ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
"prefetcher", "settings", "world_atom") + \
("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
def _start(self):
pkg = self.pkg
settings = self.settings
if not self.opts.fetchonly:
rval = _check_temp_dir(settings)
if rval != os.EX_OK:
self.returncode = rval
self._current_task = None
self._async_wait()
return
root_config = pkg.root_config
tree = "porttree"
self._tree = tree
portdb = root_config.trees[tree].dbapi
settings.setcpv(pkg)
settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
if self.opts.buildpkgonly:
settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
else:
settings.configdict["pkg"]["MERGE_TYPE"] = "source"
ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
self._ebuild_path = ebuild_path
portage.doebuild_environment(ebuild_path, 'setup',
settings=self.settings, db=portdb)
# Check the manifest here since with --keep-going mode it's
# currently possible to get this far with a broken manifest.
if not self._check_manifest():
self.returncode = 1
self._current_task = None
self._async_wait()
return
prefetcher = self.prefetcher
if prefetcher is None:
pass
elif prefetcher.isAlive() and \
prefetcher.poll() is None:
waiting_msg = "Fetching files " + \
"in the background. " + \
"To view fetch progress, run `tail -f " + \
"/var/log/emerge-fetch.log` in another " + \
"terminal."
msg_prefix = colorize("GOOD", " * ")
from textwrap import wrap
waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
for line in wrap(waiting_msg, 65))
if not self.background:
writemsg(waiting_msg, noiselevel=-1)
self._current_task = prefetcher
prefetcher.addExitListener(self._prefetch_exit)
return
self._prefetch_exit(prefetcher)
def _check_manifest(self):
success = True
settings = self.settings
if 'strict' in settings.features and \
'digest' not in settings.features:
settings['O'] = os.path.dirname(self._ebuild_path)
quiet_setting = settings.get('PORTAGE_QUIET')
settings['PORTAGE_QUIET'] = '1'
try:
success = digestcheck([], settings, strict=True)
finally:
if quiet_setting:
settings['PORTAGE_QUIET'] = quiet_setting
else:
del settings['PORTAGE_QUIET']
return success
def _prefetch_exit(self, prefetcher):
if self._was_cancelled():
self.wait()
return
opts = self.opts
pkg = self.pkg
settings = self.settings
if opts.fetchonly:
if opts.pretend:
fetcher = EbuildFetchonly(
fetch_all=opts.fetch_all_uri,
pkg=pkg, pretend=opts.pretend,
settings=settings)
retval = fetcher.execute()
self.returncode = retval
self.wait()
return
else:
fetcher = EbuildFetcher(
config_pool=self.config_pool,
ebuild_path=self._ebuild_path,
fetchall=self.opts.fetch_all_uri,
fetchonly=self.opts.fetchonly,
background=False,
logfile=None,
pkg=self.pkg,
scheduler=self.scheduler)
self._start_task(fetcher, self._fetchonly_exit)
return
self._build_dir = EbuildBuildDir(
scheduler=self.scheduler, settings=settings)
self._build_dir.lock()
# Cleaning needs to happen before fetch, since the build dir
# is used for log handling.
msg = " === (%s of %s) Cleaning (%s::%s)" % \
(self.pkg_count.curval, self.pkg_count.maxval,
self.pkg.cpv, self._ebuild_path)
short_msg = "emerge: (%s of %s) %s Clean" % \
(self.pkg_count.curval, self.pkg_count.maxval, self.pkg.cpv)
self.logger.log(msg, short_msg=short_msg)
pre_clean_phase = EbuildPhase(background=self.background,
phase='clean', scheduler=self.scheduler, settings=self.settings)
self._start_task(pre_clean_phase, self._pre_clean_exit)
def _fetchonly_exit(self, fetcher):
self._final_exit(fetcher)
if self.returncode != os.EX_OK:
portdb = self.pkg.root_config.trees[self._tree].dbapi
spawn_nofetch(portdb, self._ebuild_path, settings=self.settings)
elif 'digest' in self.settings.features:
if not digestgen(mysettings=self.settings,
myportdb=self.pkg.root_config.trees[self._tree].dbapi):
self.returncode = 1
self.wait()
def _pre_clean_exit(self, pre_clean_phase):
if self._default_exit(pre_clean_phase) != os.EX_OK:
self._unlock_builddir()
self.wait()
return
# for log handling
portage.prepare_build_dirs(self.pkg.root, self.settings, 1)
fetcher = EbuildFetcher(config_pool=self.config_pool,
ebuild_path=self._ebuild_path,
fetchall=self.opts.fetch_all_uri,
fetchonly=self.opts.fetchonly,
background=self.background,
logfile=self.settings.get('PORTAGE_LOG_FILE'),
pkg=self.pkg, scheduler=self.scheduler)
try:
already_fetched = fetcher.already_fetched(self.settings)
except portage.exception.InvalidDependString as e:
msg_lines = []
msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
(self.pkg.cpv, e)
msg_lines.append(msg)
fetcher._eerror(msg_lines)
portage.elog.elog_process(self.pkg.cpv, self.settings)
self.returncode = 1
self._current_task = None
self._unlock_builddir()
self.wait()
return
if already_fetched:
# This case is optimized to skip the fetch queue.
fetcher = None
self._fetch_exit(fetcher)
return
# Allow the Scheduler's fetch queue to control the
# number of concurrent fetchers.
fetcher.addExitListener(self._fetch_exit)
self._task_queued(fetcher)
self.scheduler.fetch.schedule(fetcher)
def _fetch_exit(self, fetcher):
if fetcher is not None and \
self._default_exit(fetcher) != os.EX_OK:
self._fetch_failed()
return
# discard successful fetch log
self._build_dir.clean_log()
pkg = self.pkg
logger = self.logger
opts = self.opts
pkg_count = self.pkg_count
scheduler = self.scheduler
settings = self.settings
features = settings.features
ebuild_path = self._ebuild_path
system_set = pkg.root_config.sets["system"]
#buildsyspkg: Check if we need to _force_ binary package creation
self._issyspkg = "buildsyspkg" in features and \
system_set.findAtomForPackage(pkg) and \
"buildpkg" not in features and \
opts.buildpkg != 'n'
if ("buildpkg" in features or self._issyspkg) \
and not self.opts.buildpkg_exclude.findAtomForPackage(pkg):
self._buildpkg = True
msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
short_msg = "emerge: (%s of %s) %s Compile" % \
(pkg_count.curval, pkg_count.maxval, pkg.cpv)
logger.log(msg, short_msg=short_msg)
else:
msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
short_msg = "emerge: (%s of %s) %s Compile" % \
(pkg_count.curval, pkg_count.maxval, pkg.cpv)
logger.log(msg, short_msg=short_msg)
build = EbuildExecuter(background=self.background, pkg=pkg,
scheduler=scheduler, settings=settings)
self._start_task(build, self._build_exit)
def _fetch_failed(self):
# We only call the pkg_nofetch phase if either RESTRICT=fetch
# is set or the package has explicitly overridden the default
# pkg_nofetch implementation. This allows specialized messages
# to be displayed for problematic packages even though they do
# not set RESTRICT=fetch (bug #336499).
if 'fetch' not in self.pkg.restrict and \
'nofetch' not in self.pkg.defined_phases:
self._unlock_builddir()
self.wait()
return
self.returncode = None
nofetch_phase = EbuildPhase(background=self.background,
phase='nofetch', scheduler=self.scheduler, settings=self.settings)
self._start_task(nofetch_phase, self._nofetch_exit)
def _nofetch_exit(self, nofetch_phase):
self._final_exit(nofetch_phase)
self._unlock_builddir()
self.returncode = 1
self.wait()
def _unlock_builddir(self):
portage.elog.elog_process(self.pkg.cpv, self.settings)
self._build_dir.unlock()
def _build_exit(self, build):
if self._default_exit(build) != os.EX_OK:
self._unlock_builddir()
self.wait()
return
buildpkg = self._buildpkg
if not buildpkg:
self._final_exit(build)
self.wait()
return
if self._issyspkg:
msg = ">>> This is a system package, " + \
"let's pack a rescue tarball.\n"
self.scheduler.output(msg,
log_path=self.settings.get("PORTAGE_LOG_FILE"))
binpkg_tasks = TaskSequence()
requested_binpkg_formats = self.settings.get("PORTAGE_BINPKG_FORMAT", "tar").split()
for pkg_fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
if pkg_fmt in requested_binpkg_formats:
if pkg_fmt == "rpm":
binpkg_tasks.add(EbuildPhase(background=self.background,
phase="rpm", scheduler=self.scheduler,
settings=self.settings))
else:
binpkg_tasks.add(EbuildBinpkg(background=self.background,
pkg=self.pkg, scheduler=self.scheduler,
settings=self.settings))
self._start_task(binpkg_tasks, self._buildpkg_exit)
def _buildpkg_exit(self, packager):
"""
Released build dir lock when there is a failure or
when in buildpkgonly mode. Otherwise, the lock will
be released when merge() is called.
"""
if self._default_exit(packager) != os.EX_OK:
self._unlock_builddir()
self.wait()
return
if self.opts.buildpkgonly:
phase = 'success_hooks'
success_hooks = MiscFunctionsProcess(
background=self.background,
commands=[phase], phase=phase,
scheduler=self.scheduler, settings=self.settings)
self._start_task(success_hooks,
self._buildpkgonly_success_hook_exit)
return
# Continue holding the builddir lock until
# after the package has been installed.
self._current_task = None
self.returncode = packager.returncode
self.wait()
def _buildpkgonly_success_hook_exit(self, success_hooks):
self._default_exit(success_hooks)
self.returncode = None
# Need to call "clean" phase for buildpkgonly mode
portage.elog.elog_process(self.pkg.cpv, self.settings)
phase = 'clean'
clean_phase = EbuildPhase(background=self.background,
phase=phase, scheduler=self.scheduler, settings=self.settings)
self._start_task(clean_phase, self._clean_exit)
def _clean_exit(self, clean_phase):
if self._final_exit(clean_phase) != os.EX_OK or \
self.opts.buildpkgonly:
self._unlock_builddir()
self.wait()
def create_install_task(self):
"""
Install the package and then clean up and release locks.
Only call this after the build has completed successfully
and neither fetchonly nor buildpkgonly mode are enabled.
"""
ldpath_mtimes = self.ldpath_mtimes
logger = self.logger
pkg = self.pkg
pkg_count = self.pkg_count
settings = self.settings
world_atom = self.world_atom
ebuild_path = self._ebuild_path
tree = self._tree
task = EbuildMerge(exit_hook=self._install_exit,
find_blockers=self.find_blockers,
ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
pkg_count=pkg_count, pkg_path=ebuild_path,
scheduler=self.scheduler,
settings=settings, tree=tree, world_atom=world_atom)
msg = " === (%s of %s) Merging (%s::%s)" % \
(pkg_count.curval, pkg_count.maxval,
pkg.cpv, ebuild_path)
short_msg = "emerge: (%s of %s) %s Merge" % \
(pkg_count.curval, pkg_count.maxval, pkg.cpv)
logger.log(msg, short_msg=short_msg)
return task
def _install_exit(self, task):
self._unlock_builddir()
|
tadashi-aikawa/gemini
|
refs/heads/master
|
tests/addons/reqs2reqs/test_rename.py
|
1
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pytest
from owlmixin.util import load_yaml
from jumeaux.addons.reqs2reqs.rename import Executor
from jumeaux.models import Reqs2ReqsAddOnPayload
RENAME_WITH_CONDITION = (
"Rename requests with a condition",
"""
conditions:
- name: renamed
when: "'target' in path"
""",
[
{"name": "name1", "method": "GET", "path": "target", "headers": {}, "qs": {}},
{"name": "name2", "method": "GET", "path": "TARGET", "headers": {}, "qs": {}},
{"name": "name3", "method": "GET", "path": "This is target, too", "headers": {}, "qs": {}},
],
[
{
"name": "renamed",
"method": "GET",
"path": "target",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
{
"name": "name2",
"method": "GET",
"path": "TARGET",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
{
"name": "renamed",
"method": "GET",
"path": "This is target, too",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
],
)
RENAME_WITH_CONDITIONS = (
"Rename requests with a conditions",
"""
conditions:
- name: "Over 100 ({{ name }}: {{ qs.id.0 }})"
when: "qs.id.0|int > 100"
- name: "Over 10 ({{ name }}: {{ qs.id.0 }})"
when: "qs.id.0|int > 10"
""",
[
{"name": "name1", "method": "GET", "path": "target1", "headers": {}, "qs": {"id": ["500"]}},
{"name": "name2", "method": "GET", "path": "target2", "headers": {}, "qs": {"id": ["50"]}},
{"name": "name3", "method": "GET", "path": "target3", "headers": {}, "qs": {"id": ["5"]}},
],
[
{
"name": "Over 100 (name1: 500)",
"method": "GET",
"path": "target1",
"headers": {},
"qs": {"id": ["500"]},
"url_encoding": "utf-8",
},
{
"name": "Over 10 (name2: 50)",
"method": "GET",
"path": "target2",
"headers": {},
"qs": {"id": ["50"]},
"url_encoding": "utf-8",
},
{
"name": "name3",
"method": "GET",
"path": "target3",
"headers": {},
"qs": {"id": ["5"]},
"url_encoding": "utf-8",
},
],
)
RENAME_ALL = (
"Rename all",
"""
conditions:
- name: target1
when: "path == 'target1'"
- name: END
""",
[
{"name": "name1", "method": "GET", "path": "target1", "headers": {}, "qs": {}},
{"name": "name2", "method": "GET", "path": "target2", "headers": {}, "qs": {}},
{"name": "name3", "method": "GET", "path": "target3", "headers": {}, "qs": {}},
],
[
{
"name": "target1",
"method": "GET",
"path": "target1",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
{
"name": "END",
"method": "GET",
"path": "target2",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
{
"name": "END",
"method": "GET",
"path": "target3",
"headers": {},
"qs": {},
"url_encoding": "utf-8",
},
],
)
class TestExec:
@pytest.mark.parametrize(
"title, config_yml, requests, expected_result",
[RENAME_WITH_CONDITION, RENAME_WITH_CONDITIONS, RENAME_ALL],
)
def test_rename(self, title, config_yml, requests, expected_result):
payload: Reqs2ReqsAddOnPayload = Reqs2ReqsAddOnPayload.from_dict({"requests": requests})
actual: Reqs2ReqsAddOnPayload = Executor(load_yaml(config_yml)).exec(payload, None)
assert expected_result == actual.requests.to_dicts()
|
pyecs/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/echo-query_v13_wsh.py
|
266
|
#!/usr/bin/python
from mod_pywebsocket import msgutil, util
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
while True:
msgutil.send_message(request, request.unparsed_uri.split('?')[1] or '')
return
|
thibdct/bibliotheque-toulouse-python
|
refs/heads/master
|
bibliothequetoulouse/__init__.py
|
1
|
# -*- coding: utf-8 -*-
r"""Les bibliothèques de Toulouse <http://www.bibliotheque.toulouse.fr/> proposent un très vaste choix de livres, DVD et autres créations.
:mod:`bibliotheque-toulouse` facilite la récupération des informations du catalogue (exemplaires disponibles, emplacement...).
Recherche des exemplaires disponibles du roman Le meilleur des mondes, d'Aldous Huxley ::
>>> import bibliotheque-toulouse as bib
>>> exemplaires_trouves = bib.rechercher("Le meilleur des mondes", "Aldous Huxley")
"""
__version__ = '0.1.8'
from builtins import object
from bibliothequetoulouse.client import Client
import json
import sys
# Permet à Sphinx de récupérer ces éléments pour la documentation
__all__ = ['Client']
if (sys.version_info > (3, 0)):
_PYTHON_3 = True
else:
_PYTHON_3 = False
class Liste_resultats(object):
""" Classe regroupant un liste de résultats d'une recherche dans le catalogue """
def __init__(self, liste_resultats):
self.liste_resultats = liste_resultats
def __len__(self): # Méthode pour demander le nombre de résultats (ex : len(liste_resultats))
return len(self.liste_resultats)
def __getitem__(self, key): # Méthode pour interroger l'objet comme une liste (ex : liste_resultats[1])
return Resultat(self.liste_resultats[key])
def __repr__(self): # Méthode d'affichage de l'objet (ici, une sortie JSON indentée)
return _pretty_print_json(self.liste_resultats)
class Resultat(object):
""" Classe représentant un résultat de recherche dans le catalogue """
def __init__(self, resultat):
self.resultat = resultat
def __getattr__(self, key): # Méthode pour récupérer la valeur d'un attribut (ex : resultat.titre)
return self.resultat.get(key)
def __getitem__(self, key): # Méthode pour récupérer la valeur d'un attribut comme un dictionnaire (ex : resultat["titre"])
return self.resultat.get(key)
def __repr__(self): # Méthode d'affichage de l'objet (ici, une sortie JSON indentée)
return _pretty_print_json(self.resultat)
def _pretty_print_json(python_object):
""" Renvoie une chaine JSON indentée """
chaine_json = json.dumps(python_object, ensure_ascii=False, indent=4, sort_keys=True).strip()
if _PYTHON_3 :
return chaine_json
else :
return chaine_json.encode('utf-8')
def rechercher(titre="", auteur="", pertinence_minimum=0.7, bibli_souhaitees=[], dispo_uniquement=False, sauf_braille=True):
if (auteur == None) : auteur = ""
if (titre == None) : titre = ""
bib = Client()
liste_resultats = bib.rechercher(titre, auteur)
# Filtres
liste_resultats_filtree = [x for x in liste_resultats if x['pertinence'] > pertinence_minimum]
if len(bibli_souhaitees) > 0: # On ne filtre pas sur les bibliothèques si aucune n'est spécifiée
liste_resultats_filtree = [x for x in liste_resultats_filtree if x['bibliotheque'] in bibli_souhaitees]
if dispo_uniquement:
liste_resultats_filtree = [x for x in liste_resultats_filtree if x['dispo'] == True]
if sauf_braille:
liste_resultats_filtree = [x for x in liste_resultats_filtree if u'braille' not in x['materiel'].lower()]
return Liste_resultats(liste_resultats_filtree)
|
nickgu/kvdict2
|
refs/heads/master
|
script/benchmark.py
|
1
|
#! /bin/env python
# encoding=utf-8
# gusimiu@baidu.com
#
# 对kvdict进行性能测试
#
import os
import sys
import time
import random
import kvdict2
def random_string(n):
ret = ('%1d' % random.randint(0, 10)) * n
return ret
def test_file(d):
d.load(file_name)
tm_begin = time.time()
for k in key_list:
s = d.find(k)
during = time.time() - tm_begin
print >> sys.stderr, "SEARCHING_KEYS : %d" % len(key_list)
print >> sys.stderr, "USING_TIME : %.3f(s)" % during
print >> sys.stderr, "AVERAGE_TIME : %.3f(s)" % (during / len(key_list))
print >> sys.stderr, "QPS : %.1f qps" % (len(key_list) / during)
if __name__=='__main__':
test_num = 1000000
key_length = 8
value_length = 128
print >> sys.stderr, "preparing data.."
file_name = 'benchmark_data.txt'
os.system('rm -rf %s' % file_name)
key_list = []
f = file(file_name, 'w')
for i in range(test_num):
key = random_string(key_length)
value = random_string(value_length)
f.write('%s\t%s\n' % (key, value))
key_list.append(key)
if i % 100000 == 0:
print >> sys.stderr, "write %d record(s)" % i
f.close()
print >> sys.stderr, "complete preparing."
key_list = sorted(key_list)
d = kvdict2.FileIndexKVDict()
print >> sys.stderr, "KEY_LENGTH : %d" % key_length
print >> sys.stderr, "VALUE_LENGTH : %d" % value_length
print >> sys.stderr, "TEST #1 LOAD IN DISK:"
test_file(d)
print >> sys.stderr, "TEST #2 LOAD IN MEMORY:"
d = kvdict2.KVDict()
test_file(d)
os.system('rm -rf benchmark_data.txt')
|
vicnet/weboob
|
refs/heads/master
|
contrib/plugin.video.videoobmc/default_test.py
|
9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import resources.lib.test.common_test as common_xbmc
import resources.lib.constants as constants
from resources.lib.actions import actions
print(sys.argv)
if len(sys.argv) < 2:
actions[constants.DISPLAY_MENU]()._do()
else:
params = common_xbmc.parse_params(sys.argv[1])
#print params
action = params.get("action")
if (action):
actions[action]()._do(params)
else:
common_xbmc.display_error(" ARGV Nothing done.. verify params " + repr(params))
|
mschwager/CTFd
|
refs/heads/master
|
CTFd/admin/statistics.py
|
1
|
from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint
from CTFd.utils import admins_only, is_admin, unix_time, get_config, \
set_config, sendmail, rmdir, create_image, delete_image, run_image, container_status, container_ports, \
container_stop, container_start, get_themes, cache, upload_file
from CTFd.models import db, Teams, Solves, Awards, Containers, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError
admin_statistics = Blueprint('admin_statistics', __name__)
@admin_statistics.route('/admin/graphs')
@admins_only
def admin_graphs():
return render_template('admin/graphs.html')
@admin_statistics.route('/admin/graphs/<graph_type>')
@admins_only
def admin_graph(graph_type):
if graph_type == 'categories':
categories = db.session.query(Challenges.category, db.func.count(Challenges.category)).group_by(Challenges.category).all()
json_data = {'categories': []}
for category, count in categories:
json_data['categories'].append({'category': category, 'count': count})
return jsonify(json_data)
elif graph_type == "solves":
solves_sub = db.session.query(Solves.chalid, db.func.count(Solves.chalid).label('solves_cnt')) \
.join(Teams, Solves.teamid == Teams.id).filter(not Teams.banned) \
.group_by(Solves.chalid).subquery()
solves = db.session.query(solves_sub.columns.chalid, solves_sub.columns.solves_cnt, Challenges.name) \
.join(Challenges, solves_sub.columns.chalid == Challenges.id).all()
json_data = {}
for chal, count, name in solves:
json_data[name] = count
return jsonify(json_data)
@admin_statistics.route('/admin/statistics', methods=['GET'])
@admins_only
def admin_stats():
teams_registered = db.session.query(db.func.count(Teams.id)).first()[0]
wrong_count = db.session.query(db.func.count(WrongKeys.id)).first()[0]
solve_count = db.session.query(db.func.count(Solves.id)).first()[0]
challenge_count = db.session.query(db.func.count(Challenges.id)).first()[0]
solves_sub = db.session.query(Solves.chalid, db.func.count(Solves.chalid).label('solves_cnt')) \
.join(Teams, Solves.teamid == Teams.id).filter(not Teams.banned) \
.group_by(Solves.chalid).subquery()
solves = db.session.query(solves_sub.columns.chalid, solves_sub.columns.solves_cnt, Challenges.name) \
.join(Challenges, solves_sub.columns.chalid == Challenges.id).all()
solve_data = {}
for chal, count, name in solves:
solve_data[name] = count
most_solved = None
least_solved = None
if len(solve_data):
most_solved = max(solve_data, key=solve_data.get)
least_solved = min(solve_data, key=solve_data.get)
db.session.expunge_all()
db.session.commit()
db.session.close()
return render_template('admin/statistics.html', team_count=teams_registered,
wrong_count=wrong_count,
solve_count=solve_count,
challenge_count=challenge_count,
solve_data=solve_data,
most_solved=most_solved,
least_solved=least_solved)
@admin_statistics.route('/admin/wrong_keys', defaults={'page': '1'}, methods=['GET'])
@admin_statistics.route('/admin/wrong_keys/<int:page>', methods=['GET'])
@admins_only
def admin_wrong_key(page):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
wrong_keys = WrongKeys.query.add_columns(WrongKeys.id, WrongKeys.chalid, WrongKeys.flag, WrongKeys.teamid, WrongKeys.date,
Challenges.name.label('chal_name'), Teams.name.label('team_name')) \
.join(Challenges) \
.join(Teams) \
.order_by(WrongKeys.date.desc()) \
.slice(page_start, page_end) \
.all()
wrong_count = db.session.query(db.func.count(WrongKeys.id)).first()[0]
pages = int(wrong_count / results_per_page) + (wrong_count % results_per_page > 0)
return render_template('admin/wrong_keys.html', wrong_keys=wrong_keys, pages=pages, curr_page=page)
@admin_statistics.route('/admin/correct_keys', defaults={'page': '1'}, methods=['GET'])
@admin_statistics.route('/admin/correct_keys/<int:page>', methods=['GET'])
@admins_only
def admin_correct_key(page):
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
solves = Solves.query.add_columns(Solves.id, Solves.chalid, Solves.teamid, Solves.date, Solves.flag,
Challenges.name.label('chal_name'), Teams.name.label('team_name')) \
.join(Challenges) \
.join(Teams) \
.order_by(Solves.date.desc()) \
.slice(page_start, page_end) \
.all()
solve_count = db.session.query(db.func.count(Solves.id)).first()[0]
pages = int(solve_count / results_per_page) + (solve_count % results_per_page > 0)
return render_template('admin/correct_keys.html', solves=solves, pages=pages, curr_page=page)
|
stephanie-wang/ray
|
refs/heads/master
|
python/ray/experimental/actor_pool.py
|
1
|
import ray
class ActorPool:
"""Utility class to operate on a fixed pool of actors.
Arguments:
actors (list): List of Ray actor handles to use in this pool.
Examples:
>>> a1, a2 = Actor.remote(), Actor.remote()
>>> pool = ActorPool([a1, a2])
>>> print(pool.map(lambda a, v: a.double.remote(v), [1, 2, 3, 4]))
[2, 4, 6, 8]
"""
def __init__(self, actors):
# actors to be used
self._idle_actors = list(actors)
# get actor from future
self._future_to_actor = {}
# get future from index
self._index_to_future = {}
# next task to do
self._next_task_index = 0
# next task to return
self._next_return_index = 0
# next work depending when actors free
self._pending_submits = []
def map(self, fn, values):
"""Apply the given function in parallel over the actors and values.
This returns an ordered iterator that will return results of the map
as they finish. Note that you must iterate over the iterator to force
the computation to finish.
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectID computing the result over the value. The
actor will be considered busy until the ObjectID completes.
values (list): List of values that fn(actor, value) should be
applied to.
Returns:
Iterator over results from applying fn to the actors and values.
Examples:
>>> pool = ActorPool(...)
>>> print(pool.map(lambda a, v: a.double.remote(v), [1, 2, 3, 4]))
[2, 4, 6, 8]
"""
for v in values:
self.submit(fn, v)
while self.has_next():
yield self.get_next()
def map_unordered(self, fn, values):
"""Similar to map(), but returning an unordered iterator.
This returns an unordered iterator that will return results of the map
as they finish. This can be more efficient that map() if some results
take longer to compute than others.
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectID computing the result over the value. The
actor will be considered busy until the ObjectID completes.
values (list): List of values that fn(actor, value) should be
applied to.
Returns:
Iterator over results from applying fn to the actors and values.
Examples:
>>> pool = ActorPool(...)
>>> print(pool.map(lambda a, v: a.double.remote(v), [1, 2, 3, 4]))
[6, 2, 4, 8]
"""
for v in values:
self.submit(fn, v)
while self.has_next():
yield self.get_next_unordered()
def submit(self, fn, value):
"""Schedule a single task to run in the pool.
This has the same argument semantics as map(), but takes on a single
value instead of a list of values. The result can be retrieved using
get_next() / get_next_unordered().
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectID computing the result over the value. The
actor will be considered busy until the ObjectID completes.
value (object): Value to compute a result for.
Examples:
>>> pool = ActorPool(...)
>>> pool.submit(lambda a, v: a.double.remote(v), 1)
>>> pool.submit(lambda a, v: a.double.remote(v), 2)
>>> print(pool.get_next(), pool.get_next())
2, 4
"""
if self._idle_actors:
actor = self._idle_actors.pop()
future = fn(actor, value)
self._future_to_actor[future] = (self._next_task_index, actor)
self._index_to_future[self._next_task_index] = future
self._next_task_index += 1
else:
self._pending_submits.append((fn, value))
def has_next(self):
"""Returns whether there are any pending results to return.
Returns:
True if there are any pending results not yet returned.
Examples:
>>> pool = ActorPool(...)
>>> pool.submit(lambda a, v: a.double.remote(v), 1)
>>> print(pool.has_next())
True
>>> print(pool.get_next())
2
>>> print(pool.has_next())
False
"""
return bool(self._future_to_actor)
def get_next(self, timeout=None):
"""Returns the next pending result in order.
This returns the next result produced by submit(), blocking for up to
the specified timeout until it is available.
Returns:
The next result.
Raises:
TimeoutError if the timeout is reached.
Examples:
>>> pool = ActorPool(...)
>>> pool.submit(lambda a, v: a.double.remote(v), 1)
>>> print(pool.get_next())
2
"""
if not self.has_next():
raise StopIteration("No more results to get")
if self._next_return_index >= self._next_task_index:
raise ValueError("It is not allowed to call get_next() after "
"get_next_unordered().")
future = self._index_to_future[self._next_return_index]
if timeout is not None:
res, _ = ray.wait([future], timeout=timeout)
if not res:
raise TimeoutError("Timed out waiting for result")
del self._index_to_future[self._next_return_index]
self._next_return_index += 1
i, a = self._future_to_actor.pop(future)
self._return_actor(a)
return ray.get(future)
def get_next_unordered(self, timeout=None):
"""Returns any of the next pending results.
This returns some result produced by submit(), blocking for up to
the specified timeout until it is available. Unlike get_next(), the
results are not always returned in same order as submitted, which can
improve performance.
Returns:
The next result.
Raises:
TimeoutError if the timeout is reached.
Examples:
>>> pool = ActorPool(...)
>>> pool.submit(lambda a, v: a.double.remote(v), 1)
>>> pool.submit(lambda a, v: a.double.remote(v), 2)
>>> print(pool.get_next_unordered())
4
>>> print(pool.get_next_unordered())
2
"""
if not self.has_next():
raise StopIteration("No more results to get")
# TODO(ekl) bulk wait for performance
res, _ = ray.wait(
list(self._future_to_actor), num_returns=1, timeout=timeout)
if res:
[future] = res
else:
raise TimeoutError("Timed out waiting for result")
i, a = self._future_to_actor.pop(future)
self._return_actor(a)
del self._index_to_future[i]
self._next_return_index = max(self._next_return_index, i + 1)
return ray.get(future)
def _return_actor(self, actor):
self._idle_actors.append(actor)
if self._pending_submits:
self.submit(*self._pending_submits.pop(0))
|
ivan-fedorov/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/db/backends/mysql/base.py
|
308
|
from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as MySQLDatabaseWrapper
from django.contrib.gis.db.backends.mysql.creation import MySQLCreation
from django.contrib.gis.db.backends.mysql.introspection import MySQLIntrospection
from django.contrib.gis.db.backends.mysql.operations import MySQLOperations
class DatabaseWrapper(MySQLDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.creation = MySQLCreation(self)
self.ops = MySQLOperations()
self.introspection = MySQLIntrospection(self)
|
pombredanne/milk
|
refs/heads/master
|
milk/tests/test_multi_view.py
|
2
|
import milk.supervised.multi_view
import numpy as np
import milk.supervised.svm
from milk.supervised.defaultclassifier import feature_selection_simple
def test_multi_view():
from milksets.wine import load
features, labels = load()
features0 = features[::10]
features1 = features[1::10]
features2 = features[2::10]
labels0 = labels[::10]
labels1 = labels[1::10]
labels2 = labels[2::10]
assert np.all(labels0 == labels1)
assert np.all(labels1 == labels2)
labels = labels0
train_features = list(zip(features0,features1,features2))
test_features = list(zip(features[3::10], features[4::10], features[5::10]))
base = milk.supervised.classifier.ctransforms(
feature_selection_simple(),
milk.supervised.svm.svm_raw(C=128, kernel=milk.supervised.svm.rbf_kernel(4.)),
milk.supervised.svm.svm_sigmoidal_correction()
)
classifier = milk.supervised.multi_view.multi_view_classifier([base,base,base])
model = classifier.train(train_features, labels == 0)
assert ([model.apply(f) for f in test_features] == (labels == 0)).mean() > .9
|
AnimationMentor/pikatopic
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
setup(name='pikatopic',
version='1.0.4',
description='A convenience layer atop Pika for use with RabbitMQ topic exchanges.',
url='http://github.com/AnimationMentor/pikatopic',
author='Reed Wade for Artella.com',
author_email='reed@artella.com',
license='MIT',
packages=['pikatopic'],
install_requires=[
'pika',
],
)
|
unnikrishnankgs/va
|
refs/heads/master
|
venv/lib/python3.5/site-packages/sqlparse/exceptions.py
|
20
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
"""Exceptions used in this package."""
class SQLParseError(Exception):
"""Base class for exceptions in this module."""
|
cybernet14/scikit-learn
|
refs/heads/master
|
doc/tutorial/text_analytics/data/movie_reviews/fetch_data.py
|
278
|
"""Script to download the movie review dataset"""
import os
import tarfile
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
URL = ("http://www.cs.cornell.edu/people/pabo/"
"movie-review-data/review_polarity.tar.gz")
ARCHIVE_NAME = URL.rsplit('/', 1)[1]
DATA_FOLDER = "txt_sentoken"
if not os.path.exists(DATA_FOLDER):
if not os.path.exists(ARCHIVE_NAME):
print("Downloading dataset from %s (3 MB)" % URL)
opener = urlopen(URL)
open(ARCHIVE_NAME, 'wb').write(opener.read())
print("Decompressing %s" % ARCHIVE_NAME)
tarfile.open(ARCHIVE_NAME, "r:gz").extractall(path='.')
os.remove(ARCHIVE_NAME)
|
i/thrift
|
refs/heads/master
|
test/py/TestServer.py
|
12
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import division
import sys, glob, time, os
sys.path.insert(0, glob.glob(os.path.join(os.path.dirname(__file__),'../../lib/py/build/lib.*'))[0])
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydir', type='string', dest='genpydir',
default='gen-py',
help='include this local directory in sys.path for locating generated code')
parser.add_option("--port", type="int", dest="port",
help="port number for server to listen on")
parser.add_option("--zlib", action="store_true", dest="zlib",
help="use zlib wrapper for compressed transport")
parser.add_option("--ssl", action="store_true", dest="ssl",
help="use SSL for encrypted transport")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.add_option('--protocol', dest="proto", type="string",
help="protocol to use, one of: accel, binary, compact, json")
parser.add_option('--transport', dest="trans", type="string",
help="transport to use, one of: buffered, framed")
parser.set_defaults(port=9090, verbose=1, proto='binary')
options, args = parser.parse_args()
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
sys.path.insert(0, os.path.join(script_dir, options.genpydir))
from ThriftTest import ThriftTest
from ThriftTest.ttypes import *
from thrift.Thrift import TException
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import TZlibTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import TJSONProtocol
from thrift.server import TServer, TNonblockingServer, THttpServer
PROT_FACTORIES = {'binary': TBinaryProtocol.TBinaryProtocolFactory,
'accel': TBinaryProtocol.TBinaryProtocolAcceleratedFactory,
'compact': TCompactProtocol.TCompactProtocolFactory,
'json': TJSONProtocol.TJSONProtocolFactory}
class TestHandler:
def testVoid(self):
if options.verbose > 1:
print 'testVoid()'
def testString(self, str):
if options.verbose > 1:
print 'testString(%s)' % str
return str
def testByte(self, byte):
if options.verbose > 1:
print 'testByte(%d)' % byte
return byte
def testI16(self, i16):
if options.verbose > 1:
print 'testI16(%d)' % i16
return i16
def testI32(self, i32):
if options.verbose > 1:
print 'testI32(%d)' % i32
return i32
def testI64(self, i64):
if options.verbose > 1:
print 'testI64(%d)' % i64
return i64
def testDouble(self, dub):
if options.verbose > 1:
print 'testDouble(%f)' % dub
return dub
def testBinary(self, thing):
if options.verbose > 1:
print 'testBinary()' # TODO: hex output
return thring
def testStruct(self, thing):
if options.verbose > 1:
print 'testStruct({%s, %d, %d, %d})' % (thing.string_thing, thing.byte_thing, thing.i32_thing, thing.i64_thing)
return thing
def testException(self, arg):
#if options.verbose > 1:
print 'testException(%s)' % arg
if arg == 'Xception':
raise Xception(errorCode=1001, message=arg)
elif arg == 'TException':
raise TException(message='This is a TException')
def testMultiException(self, arg0, arg1):
if options.verbose > 1:
print 'testMultiException(%s, %s)' % (arg0, arg1)
if arg0 == 'Xception':
raise Xception(errorCode=1001, message='This is an Xception')
elif arg0 == 'Xception2':
raise Xception2(
errorCode=2002,
struct_thing=Xtruct(string_thing='This is an Xception2'))
return Xtruct(string_thing=arg1)
def testOneway(self, seconds):
if options.verbose > 1:
print 'testOneway(%d) => sleeping...' % seconds
time.sleep(seconds / 3) # be quick
if options.verbose > 1:
print 'done sleeping'
def testNest(self, thing):
if options.verbose > 1:
print 'testNest(%s)' % thing
return thing
def testMap(self, thing):
if options.verbose > 1:
print 'testMap(%s)' % thing
return thing
def testSet(self, thing):
if options.verbose > 1:
print 'testSet(%s)' % thing
return thing
def testList(self, thing):
if options.verbose > 1:
print 'testList(%s)' % thing
return thing
def testEnum(self, thing):
if options.verbose > 1:
print 'testEnum(%s)' % thing
return thing
def testTypedef(self, thing):
if options.verbose > 1:
print 'testTypedef(%s)' % thing
return thing
def testMapMap(self, thing):
if options.verbose > 1:
print 'testMapMap(%s)' % thing
return {thing: {thing: thing}}
def testInsanity(self, argument):
if options.verbose > 1:
print 'testInsanity(%s)' % argument
return {123489: {Numberz.ONE:argument}}
def testMulti(self, arg0, arg1, arg2, arg3, arg4, arg5):
if options.verbose > 1:
print 'testMulti(%s)' % [arg0, arg1, arg2, arg3, arg4, arg5]
return Xtruct(string_thing='Hello2',
byte_thing=arg0, i32_thing=arg1, i64_thing=arg2)
# set up the protocol factory form the --protocol option
pfactory_cls = PROT_FACTORIES.get(options.proto, None)
if pfactory_cls is None:
raise AssertionError('Unknown --protocol option: %s' % options.proto)
pfactory = pfactory_cls()
# get the server type (TSimpleServer, TNonblockingServer, etc...)
if len(args) > 1:
raise AssertionError('Only one server type may be specified, not multiple types.')
server_type = args[0]
# Set up the handler and processor objects
handler = TestHandler()
processor = ThriftTest.Processor(handler)
# Handle THttpServer as a special case
if server_type == 'THttpServer':
server =THttpServer.THttpServer(processor, ('', options.port), pfactory)
server.serve()
sys.exit(0)
# set up server transport and transport factory
rel_path = "../keys/server.pem"
abs_key_path = os.path.join(script_dir, rel_path)
host = None
if options.ssl:
from thrift.transport import TSSLSocket
transport = TSSLSocket.TSSLServerSocket(host, options.port, certfile=abs_key_path)
else:
transport = TSocket.TServerSocket(host, options.port)
tfactory = TTransport.TBufferedTransportFactory()
if options.trans == 'buffered':
tfactory = TTransport.TBufferedTransportFactory()
elif options.trans == 'framed':
tfactory = TTransport.TFramedTransportFactory()
elif options.trans == '':
raise AssertionError('Unknown --transport option: %s' % options.trans)
else:
tfactory = TTransport.TBufferedTransportFactory()
# if --zlib, then wrap server transport, and use a different transport factory
if options.zlib:
transport = TZlibTransport.TZlibTransport(transport) # wrap with zlib
tfactory = TZlibTransport.TZlibTransportFactory()
# do server-specific setup here:
if server_type == "TNonblockingServer":
server = TNonblockingServer.TNonblockingServer(processor, transport, inputProtocolFactory=pfactory)
elif server_type == "TProcessPoolServer":
import signal
from thrift.server import TProcessPoolServer
server = TProcessPoolServer.TProcessPoolServer(processor, transport, tfactory, pfactory)
server.setNumWorkers(5)
def set_alarm():
def clean_shutdown(signum, frame):
for worker in server.workers:
if options.verbose > 0:
print 'Terminating worker: %s' % worker
worker.terminate()
if options.verbose > 0:
print 'Requesting server to stop()'
try:
server.stop()
except:
pass
signal.signal(signal.SIGALRM, clean_shutdown)
signal.alarm(2)
set_alarm()
else:
# look up server class dynamically to instantiate server
ServerClass = getattr(TServer, server_type)
server = ServerClass(processor, transport, tfactory, pfactory)
# enter server main loop
server.serve()
|
mecolmg/OscarTweets
|
refs/heads/master
|
files/Tweeter.py
|
1
|
#Author: Colm Gallagher
#Date: 3/11/2015
#Project: Oscar Tweets
import csv
import plotly.plotly as py
from plotly.graph_objs import *
from collections import defaultdict
#Contains Tweet Data, accesed by (...).value
class Tweet:
def __init__(self, data):
self.time, self.tweetid, self.text, self.rt, \
self.geo, self.placetag, self.favs, self.usr, \
self.usrloc, self.usrid, self.timezone, self.usrfollow, \
self.usrstats, self.usrfriends, self.usrhandle, \
self.hashtags, self.mentions = data
#Imports data as Tweet objects to the variable 'tweets'
with open('oscar_tweets.csv', 'rb') as File:
File = csv.reader(File, delimiter=',',quotechar='"')
tweets = [Tweet(data) for data in File][1:]
#Imports a list of states from a CSV file to the variable 'states'
#For use in location() function
with open('states.csv', 'rb') as File:
File = csv.reader(File, delimiter=',',quotechar='"')
states = [data for data in File]
####Functions####
#Determines Most Tweeted Nominees
def popularity():
nominees = ["American Sniper","Birdman","Boyhood",
"The Grand Budapest Hotel","The Imitation Game",
"Selma","The Theory of Everything","Whiplash"]
count = defaultdict(int)
for tweet in tweets:
text = tweet.text.lower()
for nominee in nominees:
if text.count(nominee.lower().strip('the ')) != 0:
count[nominee] += 1
top = sorted(count.items(),key=lambda x:x[1], reverse=True)
#Prints out results
count = 1
print("The Most Tweeted About Best Picture Nominees:")
for t in top:
print("\t"+str(count)+": "+t[0])
count += 1
#Graphs results
data = Data([Bar(x=[data[0] for data in top],
y=[data[1] for data in top],
marker=Marker(color='#b09953'))])
layout = Layout(
title='Tweets about Best Picture Nominees',
font=Font(
family='"Open sans", verdana, arial, sans-serif',
size=17,
color='#000'
),
yaxis=YAxis(title='Number of Tweets')
)
fig = Figure(data=data,layout=layout)
plot = py.plot(fig)
#Determines when Birdman (the winner) was most tweeted about
def winner():
count = defaultdict(int)
for tweet in tweets:
hour = int(tweet.time[11:13])
minute = int(tweet.time[14:16])
text = tweet.text.lower()
if text.count('birdman') != 0:
count[(hour,minute)] += 1
times = sorted(count.items(),key=lambda x:x[1], reverse=True)
#Prints results
print("Birdman was mentioned most frequently at:")
print("\t {:02d}:{:02d} GMT".format((times[0][0][0]-1)%12 +1, times[0][0][1]))
#Graphs results
x=[data[0][0] for data in times for i in range(data[1])]
y=[data[0][1] for data in times for i in range(data[1])]
data = Data([
Histogram2d(
x=x,
y=y,
autobinx=False,
xbins=XBins(
start=0.5,
end=6.5,
size=1
),
autobiny=False,
ybins=YBins(
start=0.5,
end=60.5,
size=1
),
colorscale=[[0, 'rgb(12,51,131)'], [0.25, 'rgb(10,136,186)'], [0.5, 'rgb(242,211,56)'], [0.75, 'rgb(242,143,56)'], [1, 'rgb(217,30,30)']]
)
])
layout = Layout(
title='Times where Birdman is Mentioned<br> (GMT)',
font=Font(
family='"Open sans", verdana, arial, sans-serif',
size=17,
color='#000'
),
yaxis=YAxis(title='Minute'),
xaxis=XAxis(title='Hour')
)
fig = Figure(data=data,layout=layout)
plot = py.plot(fig)
#Determines the top tweeting states in the US
def location():
count = defaultdict(int)
for tweet in tweets:
loc = tweet.usrloc
if len(loc) != 0:
for state in states:
if loc.count(state[0]) != 0 or loc.count(state[1]) != 0:
count[state[0]] += 1
times = sorted(count.items(),key=lambda x:x[1], reverse=True)
#Prints results
print("The top 10 tweeting US states were:")
for i in range(10):
print("\t" + str(i+1)+": "+times[i][0])
#Graphs results
x = [state[0] for state in times[:10]]
y = [state[1] for state in times[:10]]
text = [state[0] for state in times[:10]]
data = Data([Bar(x=x,y=y,text=text,marker=Marker(color='#b09953'))])
layout = Layout(
title='Top Tweeting States',
font=Font(
family='"Open sans", verdana, arial, sans-serif',
size=17,
color='#000'
),
yaxis=YAxis(title='Number of Tweets Sent')
)
fig = Figure(data=data,layout=layout)
plot = py.plot(fig, filename='Top Tweeting States')
#### Additional Functions ####
#Returns inforomation on the most retweeted Tweet of the night
def topRT():
toprt = 0
topTweet = tweets[0]
for tweet in tweets:
trt = int(tweet.rt)
if trt > toprt:
toprt = trt
topTweet = tweet
#Prints results
print("The top tweet was:")
print("\n{:s}".format(topTweet.text))
print("\nWith {:s} retweets".format(topTweet.rt))
print("URL: http://twitter.com/{:s}/status/{:s}".format(topTweet.usrhandle, topTweet.tweetid))
|
tardyp/buildbot
|
refs/heads/master
|
master/buildbot/test/integration/test_log_finish.py
|
6
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from buildbot.plugins import steps
from buildbot.process.results import EXCEPTION
from buildbot.process.results import SUCCESS
from buildbot.test.util.integration import RunMasterBase
class TestLog(RunMasterBase):
# master configuration
def masterConfig(self, step):
c = {}
from buildbot.config import BuilderConfig
from buildbot.process.factory import BuildFactory
from buildbot.plugins import schedulers
c['schedulers'] = [
schedulers.AnyBranchScheduler(
name="sched",
builderNames=["testy"])]
f = BuildFactory()
f.addStep(step)
c['builders'] = [
BuilderConfig(name="testy",
workernames=["local1"],
factory=f)]
return c
@defer.inlineCallbacks
def test_shellcommand(self):
class MyStep(steps.ShellCommand):
def _newLog(obj, name, type, logid, logEncoding):
r = steps.ShellCommand._newLog(obj, name, type, logid, logEncoding)
self.curr_log = r
return self.curr_log
step = MyStep(command='echo hello')
yield self.setupConfig(self.masterConfig(step))
change = dict(branch="master",
files=["foo.c"],
author="me@foo.com",
committer="me@foo.com",
comments="good stuff",
revision="HEAD",
project="none")
build = yield self.doForceBuild(wantSteps=True, useChange=change, wantLogs=True)
self.assertEqual(build['buildid'], 1)
self.assertEqual(build['results'], SUCCESS)
self.assertTrue(self.curr_log.finished)
@defer.inlineCallbacks
def test_mastershellcommand(self):
class MyStep(steps.MasterShellCommand):
def _newLog(obj, name, type, logid, logEncoding):
r = steps.MasterShellCommand._newLog(obj, name, type, logid, logEncoding)
self.curr_log = r
return self.curr_log
step = MyStep(command='echo hello')
yield self.setupConfig(self.masterConfig(step))
change = dict(branch="master",
files=["foo.c"],
author="me@foo.com",
committer="me@foo.com",
comments="good stuff",
revision="HEAD",
project="none")
build = yield self.doForceBuild(wantSteps=True, useChange=change, wantLogs=True)
self.assertEqual(build['buildid'], 1)
self.assertEqual(build['results'], SUCCESS)
self.assertTrue(self.curr_log.finished)
@defer.inlineCallbacks
def test_mastershellcommand_issue(self):
class MyStep(steps.MasterShellCommand):
def _newLog(obj, name, type, logid, logEncoding):
r = steps.MasterShellCommand._newLog(obj, name, type, logid, logEncoding)
self.curr_log = r
self.patch(r, "finish", lambda: defer.fail(RuntimeError('Could not finish')))
return self.curr_log
step = MyStep(command='echo hello')
yield self.setupConfig(self.masterConfig(step))
change = dict(branch="master",
files=["foo.c"],
author="me@foo.com",
committer="me@foo.com",
comments="good stuff",
revision="HEAD",
project="none")
build = yield self.doForceBuild(wantSteps=True, useChange=change, wantLogs=True)
self.assertEqual(build['buildid'], 1)
self.assertFalse(self.curr_log.finished)
self.assertEqual(build['results'], EXCEPTION)
errors = self.flushLoggedErrors()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.getErrorMessage(), 'Could not finish')
|
urwithajit9/androguard
|
refs/heads/master
|
androguard/core/api_specific_resources/api_permission_mappings/api_permission_mappings_api10.py
|
37
| null |
motion2015/edx-platform
|
refs/heads/master
|
common/test/acceptance/tests/lms/test_lms_course_discovery.py
|
69
|
"""
Test course discovery.
"""
import datetime
import json
from bok_choy.web_app_test import WebAppTest
from ..helpers import remove_file
from ...pages.common.logout import LogoutPage
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.lms.discovery import CourseDiscoveryPage
from ...fixtures.course import CourseFixture
class CourseDiscoveryTest(WebAppTest):
"""
Test searching for courses.
"""
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "staff101@example.com"
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self):
"""
Create course page and courses to find
"""
# create index file
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
self.addCleanup(remove_file, self.TEST_INDEX_FILENAME)
super(CourseDiscoveryTest, self).setUp()
self.page = CourseDiscoveryPage(self.browser)
for i in range(10):
org = self.unique_id
number = unicode(i)
run = "test_run"
name = "test course"
settings = {'enrollment_start': datetime.datetime(1970, 1, 1).isoformat()}
CourseFixture(org, number, run, name, settings=settings).install()
for i in range(2):
org = self.unique_id
number = unicode(i)
run = "test_run"
name = "grass is always greener"
CourseFixture(
org,
number,
run,
name,
settings={
'enrollment_start': datetime.datetime(1970, 1, 1).isoformat()
}
).install()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, username=username, email=email, staff=staff).visit()
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self.page.visit()
def test_search(self):
"""
Make sure you can search for courses.
"""
self.page.visit()
self.assertEqual(len(self.page.result_items), 12)
self.page.search("grass")
self.assertEqual(len(self.page.result_items), 2)
self.page.clear_search()
self.assertEqual(len(self.page.result_items), 12)
|
ibressler/pyqtgraph
|
refs/heads/master
|
examples/GLScatterPlotItem.py
|
28
|
# -*- coding: utf-8 -*-
"""
Demonstrates use of GLScatterPlotItem with rapidly-updating plots.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import numpy as np
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 20
w.show()
w.setWindowTitle('pyqtgraph example: GLScatterPlotItem')
g = gl.GLGridItem()
w.addItem(g)
##
## First example is a set of points with pxMode=False
## These demonstrate the ability to have points with real size down to a very small scale
##
pos = np.empty((53, 3))
size = np.empty((53))
color = np.empty((53, 4))
pos[0] = (1,0,0); size[0] = 0.5; color[0] = (1.0, 0.0, 0.0, 0.5)
pos[1] = (0,1,0); size[1] = 0.2; color[1] = (0.0, 0.0, 1.0, 0.5)
pos[2] = (0,0,1); size[2] = 2./3.; color[2] = (0.0, 1.0, 0.0, 0.5)
z = 0.5
d = 6.0
for i in range(3,53):
pos[i] = (0,0,z)
size[i] = 2./d
color[i] = (0.0, 1.0, 0.0, 0.5)
z *= 0.5
d *= 2.0
sp1 = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False)
sp1.translate(5,5,0)
w.addItem(sp1)
##
## Second example shows a volume of points with rapidly updating color
## and pxMode=True
##
pos = np.random.random(size=(100000,3))
pos *= [10,-10,10]
pos[0] = (0,0,0)
color = np.ones((pos.shape[0], 4))
d2 = (pos**2).sum(axis=1)**0.5
size = np.random.random(size=pos.shape[0])*10
sp2 = gl.GLScatterPlotItem(pos=pos, color=(1,1,1,1), size=size)
phase = 0.
w.addItem(sp2)
##
## Third example shows a grid of points with rapidly updating position
## and pxMode = False
##
pos3 = np.zeros((100,100,3))
pos3[:,:,:2] = np.mgrid[:100, :100].transpose(1,2,0) * [-0.1,0.1]
pos3 = pos3.reshape(10000,3)
d3 = (pos3**2).sum(axis=1)**0.5
sp3 = gl.GLScatterPlotItem(pos=pos3, color=(1,1,1,.3), size=0.1, pxMode=False)
w.addItem(sp3)
def update():
## update volume colors
global phase, sp2, d2
s = -np.cos(d2*2+phase)
color = np.empty((len(d2),4), dtype=np.float32)
color[:,3] = np.clip(s * 0.1, 0, 1)
color[:,0] = np.clip(s * 3.0, 0, 1)
color[:,1] = np.clip(s * 1.0, 0, 1)
color[:,2] = np.clip(s ** 3, 0, 1)
sp2.setData(color=color)
phase -= 0.1
## update surface positions and colors
global sp3, d3, pos3
z = -np.cos(d3*2+phase)
pos3[:,2] = z
color = np.empty((len(d3),4), dtype=np.float32)
color[:,3] = 0.3
color[:,0] = np.clip(z * 3.0, 0, 1)
color[:,1] = np.clip(z * 1.0, 0, 1)
color[:,2] = np.clip(z ** 3, 0, 1)
sp3.setData(pos=pos3, color=color)
t = QtCore.QTimer()
t.timeout.connect(update)
t.start(50)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
adhish20/TwitterWithCassandra
|
refs/heads/master
|
twiss/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/py.py
|
1323
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
opensourcechipspark/platform_external_chromium_org
|
refs/heads/master
|
tools/telemetry/telemetry/page/page_test_unittest.py
|
33
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.page import page as page_module
from telemetry.page import page_test
from telemetry.page.actions import all_page_actions
from telemetry.page.actions import page_action
def _CreatePage(test_filename):
url = 'file:///' + os.path.join('..', '..', 'unittest_data', test_filename)
base_dir = os.path.dirname(__file__)
page = page_module.Page(url, None, base_dir=base_dir)
return page
class DoNothingPageTest(page_test.PageTest):
def __init__(self, action_name_to_run=''):
super(DoNothingPageTest, self).__init__('DoNothing', action_name_to_run)
def DoNothing(self, page, tab, results):
pass
class AppendAction(page_action.PageAction):
def RunAction(self, page, tab, previous_action):
self.var.append(True)
class WrapAppendAction(page_action.PageAction):
def RunsPreviousAction(self):
return True
def RunAction(self, page, tab, previous_action):
self.var.append('before')
previous_action.WillRunAction(page, tab)
previous_action.RunAction(page, tab, None)
self.var.append('after')
class PageTestUnitTest(unittest.TestCase):
def setUp(self):
super(PageTestUnitTest, self).setUp()
all_page_actions.RegisterClassForTest('append', AppendAction)
all_page_actions.RegisterClassForTest('wrap_append', WrapAppendAction)
self._page_test = DoNothingPageTest('action_to_run')
self._page = _CreatePage('blank.html')
def testRunActions(self):
action_called = []
action_to_run = [
{ 'action': 'append', 'var': action_called }
]
setattr(self._page, 'action_to_run', action_to_run)
self._page_test.Run(None, self._page, None, None)
self.assertTrue(action_called)
def testPreviousAction(self):
action_list = []
action_to_run = [
{ 'action': 'append', 'var': action_list },
{ 'action': 'wrap_append', 'var': action_list }
]
setattr(self._page, 'action_to_run', action_to_run)
self._page_test.Run(None, self._page, None, None)
self.assertEqual(action_list, ['before', True, 'after'])
def testReferenceAction(self):
action_list = []
action_to_run = [
{ 'action': 'referenced_action_1' },
{ 'action': 'referenced_action_2' }
]
referenced_action_1 = { 'action': 'append', 'var': action_list }
referenced_action_2 = { 'action': 'wrap_append', 'var': action_list }
setattr(self._page, 'action_to_run', action_to_run)
setattr(self._page, 'referenced_action_1', referenced_action_1)
setattr(self._page, 'referenced_action_2', referenced_action_2)
self._page_test.Run(None, self._page, None, None)
self.assertEqual(action_list, ['before', True, 'after'])
def testRepeatAction(self):
action_list = []
action_to_run = { 'action': 'append', 'var': action_list, 'repeat': 10 }
setattr(self._page, 'action_to_run', action_to_run)
self._page_test.Run(None, self._page, None, None)
self.assertEqual(len(action_list), 10)
def testRepeatReferenceAction(self):
action_list = []
action_to_run = { 'action': 'referenced_action', 'repeat': 2 }
referenced_action = [
{ 'action': 'append', 'var': action_list },
{ 'action': 'wrap_append', 'var': action_list }
]
setattr(self._page, 'action_to_run', action_to_run)
setattr(self._page, 'referenced_action', referenced_action)
self._page_test.Run(None, self._page, None, None)
self.assertEqual(action_list,
['before', True, 'after', 'before', True, 'after'])
def testRepeatPreviousActionFails(self):
action_list = []
action_to_run = { 'action': 'wrap_append', 'var': action_list, 'repeat': 2 }
setattr(self._page, 'action_to_run', action_to_run)
self.assertRaises(page_action.PageActionFailed,
lambda: self._page_test.Run(None, self._page, None, None))
|
cmoutard/mne-python
|
refs/heads/master
|
examples/time_frequency/plot_source_power_spectrum.py
|
19
|
"""
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.Raw(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
|
ppwwyyxx/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/optimizer_v2/adagrad.py
|
4
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Adagrad')
class Adagrad(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adagrad algorithm.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
Initialization:
$$accum_{g_0} := \text{initial_accumulator_value}$$
Update step:
$$t := t + 1$$
$$accum_{g_t} := accum_{g_{t-1}} + g^2$$
$$\theta_t := \theta_{t-1} - lr * g / (\sqrt{accum_{g_t}} + \epsilon)$$
References:
* [Paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
* [Introduction]
(https://ppasupat.github.io/a9online/uploads/proximal_notes.pdf).
"""
def __init__(self,
learning_rate=0.001,
initial_accumulator_value=0.1,
epsilon=1e-7,
name='Adagrad',
**kwargs):
"""Construct a new Adagrad optimizer.
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be non-negative.
epsilon: A small floating point value to avoid zero denominator.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
Raises:
ValueError: If the `initial_accumulator_value` or `epsilon` is invalid.
@compatibility(eager)
When eager execution is enabled, `learning_rate` can be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
@end_compatibility
"""
if initial_accumulator_value < 0.0:
raise ValueError('initial_accumulator_value must be non-negative: %s' %
initial_accumulator_value)
if epsilon is None:
epsilon = backend_config.epsilon()
super(Adagrad, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._initial_accumulator_value = initial_accumulator_value
self.epsilon = epsilon or backend_config.epsilon()
def _create_slots(self, var_list):
for var in var_list:
dtype = var.dtype.base_dtype
init = init_ops.constant_initializer(
self._initial_accumulator_value, dtype=dtype)
self.add_slot(var, 'accumulator', init)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(Adagrad, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)].update(dict(
epsilon=ops.convert_to_tensor(self.epsilon, var_dtype),
neg_lr_t=-apply_state[(var_device, var_dtype)]['lr_t'],
zero=array_ops.zeros((), dtype=dtypes.int64)
))
def set_weights(self, weights):
params = self.weights
# Override set_weights for backward compatibility of Keras V1 optimizer
# since it does not include iteration at head of the weight list. Set
# iteration to 0.
if len(params) == len(weights) + 1:
weights = [np.array(0)] + weights
super(Adagrad, self).set_weights(weights)
@classmethod
def from_config(cls, config, custom_objects=None):
"""Creates an optimizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same optimizer from the config
dictionary.
Arguments:
config: A Python dictionary, typically the output of get_config.
custom_objects: A Python dictionary mapping names to additional Python
objects used to create this optimizer, such as a function used for a
hyperparameter.
Returns:
An optimizer instance.
"""
if 'initial_accumulator_value' not in config:
config['initial_accumulator_value'] = 0.
if 'lr' in config:
config['learning_rate'] = config.pop('lr')
return cls(**config)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
acc = self.get_slot(var, 'accumulator')
return training_ops.resource_apply_adagrad_v2(
var.handle,
acc.handle,
coefficients['lr_t'],
coefficients['epsilon'],
grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
acc = self.get_slot(var, 'accumulator')
return training_ops.resource_sparse_apply_adagrad_v2(
var.handle,
acc.handle,
coefficients['lr_t'],
coefficients['epsilon'],
grad,
indices,
use_locking=self._use_locking)
def get_config(self):
config = super(Adagrad, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'initial_accumulator_value': self._initial_accumulator_value,
'epsilon': self.epsilon,
})
return config
|
alhashash/odoo
|
refs/heads/master
|
addons/hw_escpos/escpos/constants.py
|
65
|
# -*- coding: utf-8 -*-
""" ESC/POS Commands (Constants) """
# Feed control sequences
CTL_LF = '\x0a' # Print and line feed
CTL_FF = '\x0c' # Form feed
CTL_CR = '\x0d' # Carriage return
CTL_HT = '\x09' # Horizontal tab
CTL_VT = '\x0b' # Vertical tab
# RT Status commands
DLE_EOT_PRINTER = '\x10\x04\x01' # Transmit printer status
DLE_EOT_OFFLINE = '\x10\x04\x02'
DLE_EOT_ERROR = '\x10\x04\x03'
DLE_EOT_PAPER = '\x10\x04\x04'
# Printer hardware
HW_INIT = '\x1b\x40' # Clear data in buffer and reset modes
HW_SELECT = '\x1b\x3d\x01' # Printer select
HW_RESET = '\x1b\x3f\x0a\x00' # Reset printer hardware
# Cash Drawer
CD_KICK_2 = '\x1b\x70\x00' # Sends a pulse to pin 2 []
CD_KICK_5 = '\x1b\x70\x01' # Sends a pulse to pin 5 []
# Paper
PAPER_FULL_CUT = '\x1d\x56\x00' # Full cut paper
PAPER_PART_CUT = '\x1d\x56\x01' # Partial cut paper
# Text format
TXT_NORMAL = '\x1b\x21\x00' # Normal text
TXT_2HEIGHT = '\x1b\x21\x10' # Double height text
TXT_2WIDTH = '\x1b\x21\x20' # Double width text
TXT_DOUBLE = '\x1b\x21\x30' # Double height & Width
TXT_UNDERL_OFF = '\x1b\x2d\x00' # Underline font OFF
TXT_UNDERL_ON = '\x1b\x2d\x01' # Underline font 1-dot ON
TXT_UNDERL2_ON = '\x1b\x2d\x02' # Underline font 2-dot ON
TXT_BOLD_OFF = '\x1b\x45\x00' # Bold font OFF
TXT_BOLD_ON = '\x1b\x45\x01' # Bold font ON
TXT_FONT_A = '\x1b\x4d\x00' # Font type A
TXT_FONT_B = '\x1b\x4d\x01' # Font type B
TXT_ALIGN_LT = '\x1b\x61\x00' # Left justification
TXT_ALIGN_CT = '\x1b\x61\x01' # Centering
TXT_ALIGN_RT = '\x1b\x61\x02' # Right justification
TXT_COLOR_BLACK = '\x1b\x72\x00' # Default Color
TXT_COLOR_RED = '\x1b\x72\x01' # Alternative Color ( Usually Red )
# Text Encoding
TXT_ENC_PC437 = '\x1b\x74\x00' # PC437 USA
TXT_ENC_KATAKANA= '\x1b\x74\x01' # KATAKANA (JAPAN)
TXT_ENC_PC850 = '\x1b\x74\x02' # PC850 Multilingual
TXT_ENC_PC860 = '\x1b\x74\x03' # PC860 Portuguese
TXT_ENC_PC863 = '\x1b\x74\x04' # PC863 Canadian-French
TXT_ENC_PC865 = '\x1b\x74\x05' # PC865 Nordic
TXT_ENC_KANJI6 = '\x1b\x74\x06' # One-pass Kanji, Hiragana
TXT_ENC_KANJI7 = '\x1b\x74\x07' # One-pass Kanji
TXT_ENC_KANJI8 = '\x1b\x74\x08' # One-pass Kanji
TXT_ENC_PC851 = '\x1b\x74\x0b' # PC851 Greek
TXT_ENC_PC853 = '\x1b\x74\x0c' # PC853 Turkish
TXT_ENC_PC857 = '\x1b\x74\x0d' # PC857 Turkish
TXT_ENC_PC737 = '\x1b\x74\x0e' # PC737 Greek
TXT_ENC_8859_7 = '\x1b\x74\x0f' # ISO8859-7 Greek
TXT_ENC_WPC1252 = '\x1b\x74\x10' # WPC1252
TXT_ENC_PC866 = '\x1b\x74\x11' # PC866 Cyrillic #2
TXT_ENC_PC852 = '\x1b\x74\x12' # PC852 Latin2
TXT_ENC_PC858 = '\x1b\x74\x13' # PC858 Euro
TXT_ENC_KU42 = '\x1b\x74\x14' # KU42 Thai
TXT_ENC_TIS11 = '\x1b\x74\x15' # TIS11 Thai
TXT_ENC_TIS18 = '\x1b\x74\x1a' # TIS18 Thai
TXT_ENC_TCVN3 = '\x1b\x74\x1e' # TCVN3 Vietnamese
TXT_ENC_TCVN3B = '\x1b\x74\x1f' # TCVN3 Vietnamese
TXT_ENC_PC720 = '\x1b\x74\x20' # PC720 Arabic
TXT_ENC_WPC775 = '\x1b\x74\x21' # WPC775 Baltic Rim
TXT_ENC_PC855 = '\x1b\x74\x22' # PC855 Cyrillic
TXT_ENC_PC861 = '\x1b\x74\x23' # PC861 Icelandic
TXT_ENC_PC862 = '\x1b\x74\x24' # PC862 Hebrew
TXT_ENC_PC864 = '\x1b\x74\x25' # PC864 Arabic
TXT_ENC_PC869 = '\x1b\x74\x26' # PC869 Greek
TXT_ENC_8859_2 = '\x1b\x74\x27' # ISO8859-2 Latin2
TXT_ENC_8859_9 = '\x1b\x74\x28' # ISO8859-2 Latin9
TXT_ENC_PC1098 = '\x1b\x74\x29' # PC1098 Farsi
TXT_ENC_PC1118 = '\x1b\x74\x2a' # PC1118 Lithuanian
TXT_ENC_PC1119 = '\x1b\x74\x2b' # PC1119 Lithuanian
TXT_ENC_PC1125 = '\x1b\x74\x2c' # PC1125 Ukrainian
TXT_ENC_WPC1250 = '\x1b\x74\x2d' # WPC1250 Latin2
TXT_ENC_WPC1251 = '\x1b\x74\x2e' # WPC1251 Cyrillic
TXT_ENC_WPC1253 = '\x1b\x74\x2f' # WPC1253 Greek
TXT_ENC_WPC1254 = '\x1b\x74\x30' # WPC1254 Turkish
TXT_ENC_WPC1255 = '\x1b\x74\x31' # WPC1255 Hebrew
TXT_ENC_WPC1256 = '\x1b\x74\x32' # WPC1256 Arabic
TXT_ENC_WPC1257 = '\x1b\x74\x33' # WPC1257 Baltic Rim
TXT_ENC_WPC1258 = '\x1b\x74\x34' # WPC1258 Vietnamese
TXT_ENC_KZ1048 = '\x1b\x74\x35' # KZ-1048 Kazakhstan
TXT_ENC_KATAKANA_MAP = {
# Maps UTF-8 Katakana symbols to KATAKANA Page Codes
# Half-Width Katakanas
'\xef\xbd\xa1':'\xa1', # 。
'\xef\xbd\xa2':'\xa2', # 「
'\xef\xbd\xa3':'\xa3', # 」
'\xef\xbd\xa4':'\xa4', # 、
'\xef\xbd\xa5':'\xa5', # ・
'\xef\xbd\xa6':'\xa6', # ヲ
'\xef\xbd\xa7':'\xa7', # ァ
'\xef\xbd\xa8':'\xa8', # ィ
'\xef\xbd\xa9':'\xa9', # ゥ
'\xef\xbd\xaa':'\xaa', # ェ
'\xef\xbd\xab':'\xab', # ォ
'\xef\xbd\xac':'\xac', # ャ
'\xef\xbd\xad':'\xad', # ュ
'\xef\xbd\xae':'\xae', # ョ
'\xef\xbd\xaf':'\xaf', # ッ
'\xef\xbd\xb0':'\xb0', # ー
'\xef\xbd\xb1':'\xb1', # ア
'\xef\xbd\xb2':'\xb2', # イ
'\xef\xbd\xb3':'\xb3', # ウ
'\xef\xbd\xb4':'\xb4', # エ
'\xef\xbd\xb5':'\xb5', # オ
'\xef\xbd\xb6':'\xb6', # カ
'\xef\xbd\xb7':'\xb7', # キ
'\xef\xbd\xb8':'\xb8', # ク
'\xef\xbd\xb9':'\xb9', # ケ
'\xef\xbd\xba':'\xba', # コ
'\xef\xbd\xbb':'\xbb', # サ
'\xef\xbd\xbc':'\xbc', # シ
'\xef\xbd\xbd':'\xbd', # ス
'\xef\xbd\xbe':'\xbe', # セ
'\xef\xbd\xbf':'\xbf', # ソ
'\xef\xbe\x80':'\xc0', # タ
'\xef\xbe\x81':'\xc1', # チ
'\xef\xbe\x82':'\xc2', # ツ
'\xef\xbe\x83':'\xc3', # テ
'\xef\xbe\x84':'\xc4', # ト
'\xef\xbe\x85':'\xc5', # ナ
'\xef\xbe\x86':'\xc6', # ニ
'\xef\xbe\x87':'\xc7', # ヌ
'\xef\xbe\x88':'\xc8', # ネ
'\xef\xbe\x89':'\xc9', # ノ
'\xef\xbe\x8a':'\xca', # ハ
'\xef\xbe\x8b':'\xcb', # ヒ
'\xef\xbe\x8c':'\xcc', # フ
'\xef\xbe\x8d':'\xcd', # ヘ
'\xef\xbe\x8e':'\xce', # ホ
'\xef\xbe\x8f':'\xcf', # マ
'\xef\xbe\x90':'\xd0', # ミ
'\xef\xbe\x91':'\xd1', # ム
'\xef\xbe\x92':'\xd2', # メ
'\xef\xbe\x93':'\xd3', # モ
'\xef\xbe\x94':'\xd4', # ヤ
'\xef\xbe\x95':'\xd5', # ユ
'\xef\xbe\x96':'\xd6', # ヨ
'\xef\xbe\x97':'\xd7', # ラ
'\xef\xbe\x98':'\xd8', # リ
'\xef\xbe\x99':'\xd9', # ル
'\xef\xbe\x9a':'\xda', # レ
'\xef\xbe\x9b':'\xdb', # ロ
'\xef\xbe\x9c':'\xdc', # ワ
'\xef\xbe\x9d':'\xdd', # ン
'\xef\xbe\x9e':'\xde', # ゙
'\xef\xbe\x9f':'\xdf', # ゚
}
# Barcod format
BARCODE_TXT_OFF = '\x1d\x48\x00' # HRI barcode chars OFF
BARCODE_TXT_ABV = '\x1d\x48\x01' # HRI barcode chars above
BARCODE_TXT_BLW = '\x1d\x48\x02' # HRI barcode chars below
BARCODE_TXT_BTH = '\x1d\x48\x03' # HRI barcode chars both above and below
BARCODE_FONT_A = '\x1d\x66\x00' # Font type A for HRI barcode chars
BARCODE_FONT_B = '\x1d\x66\x01' # Font type B for HRI barcode chars
BARCODE_HEIGHT = '\x1d\x68\x64' # Barcode Height [1-255]
BARCODE_WIDTH = '\x1d\x77\x03' # Barcode Width [2-6]
BARCODE_UPC_A = '\x1d\x6b\x00' # Barcode type UPC-A
BARCODE_UPC_E = '\x1d\x6b\x01' # Barcode type UPC-E
BARCODE_EAN13 = '\x1d\x6b\x02' # Barcode type EAN13
BARCODE_EAN8 = '\x1d\x6b\x03' # Barcode type EAN8
BARCODE_CODE39 = '\x1d\x6b\x04' # Barcode type CODE39
BARCODE_ITF = '\x1d\x6b\x05' # Barcode type ITF
BARCODE_NW7 = '\x1d\x6b\x06' # Barcode type NW7
# Image format
S_RASTER_N = '\x1d\x76\x30\x00' # Set raster image normal size
S_RASTER_2W = '\x1d\x76\x30\x01' # Set raster image double width
S_RASTER_2H = '\x1d\x76\x30\x02' # Set raster image double height
S_RASTER_Q = '\x1d\x76\x30\x03' # Set raster image quadruple
|
InfinitiveOS/external_skia
|
refs/heads/io-1.0
|
gm/rebaseline_server/download_actuals_test.py
|
66
|
#!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test download.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
1. examine the results in self._output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
3. mv self._output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
# System-level imports
import os
import shutil
import tempfile
import urllib
# Imports from within Skia
import fix_pythonpath # must do this first
from pyutils import url_utils
import base_unittest
import download_actuals
class DownloadTest(base_unittest.TestCase):
def test_fetch(self):
"""Tests fetch() of GM results from actual-results.json ."""
downloader = download_actuals.Download(
actuals_base_url=url_utils.create_filepath_url(
os.path.join(self._input_dir, 'gm-actuals')),
gm_actuals_root_url=url_utils.create_filepath_url(
os.path.join(self._input_dir, 'fake-gm-imagefiles')))
downloader.fetch(
builder_name='Test-Android-GalaxyNexus-SGX540-Arm7-Release',
dest_dir=self._output_dir_actual)
def main():
base_unittest.main(DownloadTest)
if __name__ == '__main__':
main()
|
GovReady/readthedocs.org
|
refs/heads/master
|
readthedocs/vcs_support/backends/hg.py
|
34
|
from readthedocs.projects.exceptions import ProjectImportError
from readthedocs.vcs_support.base import BaseVCS, VCSVersion
class Backend(BaseVCS):
supports_tags = True
supports_branches = True
fallback_branch = 'default'
def update(self):
super(Backend, self).update()
retcode = self.run('hg', 'status')[0]
if retcode == 0:
return self.pull()
else:
return self.clone()
def pull(self):
pull_output = self.run('hg', 'pull')
if pull_output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg pull): %s"
% (self.repo_url, pull_output[0]))
)
update_output = self.run('hg', 'update', '-C')[0]
if update_output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg update): %s"
% (self.repo_url, pull_output[0]))
)
return update_output
def clone(self):
self.make_clean_working_dir()
output = self.run('hg', 'clone', self.repo_url, '.')
if output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg clone): %s"
% (self.repo_url, output[0]))
)
return output
@property
def branches(self):
retcode, stdout = self.run('hg', 'branches', '-q')[:2]
# error (or no tags found)
if retcode != 0:
return []
return self.parse_branches(stdout)
def parse_branches(self, data):
"""
stable
default
"""
names = [name.lstrip() for name in data.splitlines()]
return [VCSVersion(self, name, name) for name in names if name]
@property
def tags(self):
retcode, stdout = self.run('hg', 'tags')[:2]
# error (or no tags found)
if retcode != 0:
return []
return self.parse_tags(stdout)
def parse_tags(self, data):
"""
Parses output of `hg tags`, eg:
tip 278:c4b2d21db51a
0.2.2 152:6b0364d98837
0.2.1 117:a14b7b6ffa03
0.1 50:30c2c6b3a055
maintenance release 1 10:f83c32fe8126
Into VCSVersion objects with the tag name as verbose_name and the
commit hash as identifier.
"""
vcs_tags = []
tag_lines = [line.strip() for line in data.splitlines()]
# starting from the rhs of each line, split a single value (changeset)
# off at whitespace; the tag name is the string to the left of that
tag_pairs = [line.rsplit(None, 1) for line in tag_lines]
for row in tag_pairs:
if len(row) != 2:
continue
name, commit = row
if name == 'tip':
continue
revision, commit_hash = commit.split(':')
vcs_tags.append(VCSVersion(self, commit_hash, name))
return vcs_tags
@property
def commit(self):
retcode, stdout = self.run('hg', 'id', '-i')[:2]
return stdout.strip()
def checkout(self, identifier=None):
super(Backend, self).checkout()
if not identifier:
identifier = 'tip'
retcode = self.run('hg', 'status')[0]
if retcode == 0:
self.run('hg', 'pull')
return self.run('hg', 'update', '-C', identifier)
else:
self.clone()
return self.run('hg', 'update', '-C', identifier)
|
Ayub-Khan/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/mixed.py
|
11
|
"""
MixedModuleStore allows for aggregation between multiple modulestores.
In this way, courses can be served up both - say - XMLModuleStore or MongoModuleStore
"""
import logging
from contextlib import contextmanager
import itertools
import functools
from contracts import contract, new_contract
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, AssetKey
from opaque_keys.edx.locator import LibraryLocator
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.assetstore import AssetMetadata
from . import ModuleStoreWriteBase
from . import ModuleStoreEnum
from .exceptions import ItemNotFoundError, DuplicateCourseError
from .draft_and_published import ModuleStoreDraftAndPublished
from .split_migrator import SplitMigrator
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('LibraryLocator', LibraryLocator)
new_contract('long', long)
log = logging.getLogger(__name__)
def strip_key(func):
"""
A decorator for stripping version and branch information from return values that are, or contain, UsageKeys or
CourseKeys.
Additionally, the decorated function is called with an optional 'field_decorator' parameter that can be used
to strip any location(-containing) fields, which are not directly returned by the function.
The behavior can be controlled by passing 'remove_version' and 'remove_branch' booleans to the decorated
function's kwargs.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
"""
Supported kwargs:
remove_version - If True, calls 'version_agnostic' on all return values, including those in lists and dicts.
remove_branch - If True, calls 'for_branch(None)' on all return values, including those in lists and dicts.
Note: The 'field_decorator' parameter passed to the decorated function is a function that honors the
values of these kwargs.
"""
# remove version and branch, by default
rem_vers = kwargs.pop('remove_version', True)
rem_branch = kwargs.pop('remove_branch', True)
# helper function for stripping individual values
def strip_key_func(val):
"""
Strips the version and branch information according to the settings of rem_vers and rem_branch.
Recursively calls this function if the given value has a 'location' attribute.
"""
retval = val
if rem_vers and hasattr(retval, 'version_agnostic'):
retval = retval.version_agnostic()
if rem_branch and hasattr(retval, 'for_branch'):
retval = retval.for_branch(None)
if hasattr(retval, 'location'):
retval.location = strip_key_func(retval.location)
return retval
# function for stripping both, collection of, and individual, values
def strip_key_collection(field_value):
"""
Calls strip_key_func for each element in the given value.
"""
if rem_vers or rem_branch:
if isinstance(field_value, list):
field_value = [strip_key_func(fv) for fv in field_value]
elif isinstance(field_value, dict):
for key, val in field_value.iteritems():
field_value[key] = strip_key_func(val)
else:
field_value = strip_key_func(field_value)
return field_value
# call the decorated function
retval = func(field_decorator=strip_key_collection, *args, **kwargs)
# strip the return value
return strip_key_collection(retval)
return inner
class MixedModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase):
"""
ModuleStore knows how to route requests to the right persistence ms
"""
def __init__(
self,
contentstore,
mappings,
stores,
i18n_service=None,
fs_service=None,
user_service=None,
create_modulestore_instance=None,
signal_handler=None,
**kwargs
):
"""
Initialize a MixedModuleStore. Here we look into our passed in kwargs which should be a
collection of other modulestore configuration information
"""
super(MixedModuleStore, self).__init__(contentstore, **kwargs)
if create_modulestore_instance is None:
raise ValueError('MixedModuleStore constructor must be passed a create_modulestore_instance function')
self.modulestores = []
self.mappings = {}
for course_id, store_name in mappings.iteritems():
try:
self.mappings[CourseKey.from_string(course_id)] = store_name
except InvalidKeyError:
try:
self.mappings[SlashSeparatedCourseKey.from_deprecated_string(course_id)] = store_name
except InvalidKeyError:
log.exception("Invalid MixedModuleStore configuration. Unable to parse course_id %r", course_id)
continue
for store_settings in stores:
key = store_settings['NAME']
is_xml = 'XMLModuleStore' in store_settings['ENGINE']
if is_xml:
# restrict xml to only load courses in mapping
store_settings['OPTIONS']['course_ids'] = [
course_key.to_deprecated_string()
for course_key, store_key in self.mappings.iteritems()
if store_key == key
]
store = create_modulestore_instance(
store_settings['ENGINE'],
self.contentstore,
store_settings.get('DOC_STORE_CONFIG', {}),
store_settings.get('OPTIONS', {}),
i18n_service=i18n_service,
fs_service=fs_service,
user_service=user_service,
signal_handler=signal_handler,
)
# replace all named pointers to the store into actual pointers
for course_key, store_name in self.mappings.iteritems():
if store_name == key:
self.mappings[course_key] = store
self.modulestores.append(store)
def _clean_locator_for_mapping(self, locator):
"""
In order for mapping to work, the locator must be minimal--no version, no branch--
as we never store one version or one branch in one ms and another in another ms.
:param locator: the CourseKey
"""
if hasattr(locator, 'version_agnostic'):
locator = locator.version_agnostic()
if hasattr(locator, 'branch'):
locator = locator.replace(branch=None)
return locator
def _get_modulestore_for_courselike(self, locator=None):
"""
For a given locator, look in the mapping table and see if it has been pinned
to a particular modulestore
If locator is None, returns the first (ordered) store as the default
"""
if locator is not None:
locator = self._clean_locator_for_mapping(locator)
mapping = self.mappings.get(locator, None)
if mapping is not None:
return mapping
else:
if isinstance(locator, LibraryLocator):
has_locator = lambda store: hasattr(store, 'has_library') and store.has_library(locator)
else:
has_locator = lambda store: store.has_course(locator)
for store in self.modulestores:
if has_locator(store):
self.mappings[locator] = store
return store
# return the default store
return self.default_modulestore
def _get_modulestore_by_type(self, modulestore_type):
"""
This method should only really be used by tests and migration scripts when necessary.
Returns the module store as requested by type. The type can be a value from ModuleStoreEnum.Type.
"""
for store in self.modulestores:
if store.get_modulestore_type() == modulestore_type:
return store
return None
def fill_in_run(self, course_key):
"""
Some course_keys are used without runs. This function calls the corresponding
fill_in_run function on the appropriate modulestore.
"""
store = self._get_modulestore_for_courselike(course_key)
if not hasattr(store, 'fill_in_run'):
return course_key
return store.fill_in_run(course_key)
def has_item(self, usage_key, **kwargs):
"""
Does the course include the xblock who's id is reference?
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.has_item(usage_key, **kwargs)
@strip_key
def get_item(self, usage_key, depth=0, **kwargs):
"""
see parent doc
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.get_item(usage_key, depth, **kwargs)
@strip_key
def get_items(self, course_key, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_key
NOTE: don't use this to look for courses
as the course_key is required. Use get_courses.
Args:
course_key (CourseKey): the course identifier
kwargs:
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as kwargs below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as kwargs below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
For some modulestores, ``name`` is another commonly provided key (Location based stores)
For some modulestores,
you can search by ``edited_by``, ``edited_on`` providing either a datetime for == (probably
useless) or a function accepting one arg to do inequality
"""
if not isinstance(course_key, CourseKey):
raise Exception("Must pass in a course_key when calling get_items()")
store = self._get_modulestore_for_courselike(course_key)
return store.get_items(course_key, **kwargs)
@strip_key
def get_course_summaries(self, **kwargs):
"""
Returns a list containing the course information in CourseSummary objects.
Information contains `location`, `display_name`, `locator` of the courses in this modulestore.
"""
course_summaries = {}
for store in self.modulestores:
for course_summary in store.get_course_summaries(**kwargs):
course_id = self._clean_locator_for_mapping(locator=course_summary.id)
# Check if course is indeed unique. Save it in result if unique
if course_id in course_summaries:
log.warning(
u"Modulestore %s have duplicate courses %s; skipping from result.", store, course_id
)
else:
course_summaries[course_id] = course_summary
return course_summaries.values()
@strip_key
def get_courses(self, **kwargs):
'''
Returns a list containing the top level XModuleDescriptors of the courses in this modulestore.
'''
courses = {}
for store in self.modulestores:
# filter out ones which were fetched from earlier stores but locations may not be ==
for course in store.get_courses(**kwargs):
course_id = self._clean_locator_for_mapping(course.id)
if course_id not in courses:
# course is indeed unique. save it in result
courses[course_id] = course
return courses.values()
@strip_key
def get_libraries(self, **kwargs):
"""
Returns a list containing the top level XBlock of the libraries (LibraryRoot) in this modulestore.
"""
libraries = {}
for store in self.modulestores:
if not hasattr(store, 'get_libraries'):
continue
# filter out ones which were fetched from earlier stores but locations may not be ==
for library in store.get_libraries(**kwargs):
library_id = self._clean_locator_for_mapping(library.location)
if library_id not in libraries:
# library is indeed unique. save it in result
libraries[library_id] = library
return libraries.values()
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
# If there is a mapping that match this org/course/run, use that
for course_id, store in self.mappings.iteritems():
candidate_key = store.make_course_key(org, course, run)
if candidate_key == course_id:
return candidate_key
# Otherwise, return the key created by the default store
return self.default_modulestore.make_course_key(org, course, run)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for the modulestore
that matches the supplied course_key.
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.make_course_usage_key(course_key)
@strip_key
def get_course(self, course_key, depth=0, **kwargs):
"""
returns the course module associated with the course_id. If no such course exists,
it returns None
:param course_key: must be a CourseKey
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
try:
return store.get_course(course_key, depth=depth, **kwargs)
except ItemNotFoundError:
return None
@strip_key
@contract(library_key='LibraryLocator')
def get_library(self, library_key, depth=0, **kwargs):
"""
returns the library block associated with the given key. If no such library exists,
it returns None
:param library_key: must be a LibraryLocator
"""
try:
store = self._verify_modulestore_support(library_key, 'get_library')
return store.get_library(library_key, depth=depth, **kwargs)
except NotImplementedError:
log.exception("Modulestore configured for %s does not have get_library method", library_key)
return None
except ItemNotFoundError:
return None
@strip_key
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
Args:
* course_id (CourseKey)
* ignore_case (bool): If True, do a case insensitive search. If
False, do a case sensitive search
"""
assert isinstance(course_id, CourseKey)
store = self._get_modulestore_for_courselike(course_id)
return store.has_course(course_id, ignore_case, **kwargs)
def delete_course(self, course_key, user_id):
"""
See xmodule.modulestore.__init__.ModuleStoreWrite.delete_course
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.delete_course(course_key, user_id)
@contract(asset_metadata='AssetMetadata', user_id='int|long', import_only=bool)
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves the asset metadata for a particular course's asset.
Args:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
store = self._get_modulestore_for_courselike(asset_metadata.asset_id.course_key)
return store.save_asset_metadata(asset_metadata, user_id, import_only)
@contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long', import_only=bool)
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves the asset metadata for each asset in a list of asset metadata.
Optimizes the saving of many assets.
Args:
asset_metadata_list (list(AssetMetadata)): list of data about several course assets
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
if len(asset_metadata_list) == 0:
return True
store = self._get_modulestore_for_courselike(asset_metadata_list[0].asset_id.course_key)
return store.save_asset_metadata_list(asset_metadata_list, user_id, import_only)
@strip_key
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Args:
asset_key (AssetKey): locator containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.find_asset_metadata(asset_key, **kwargs)
@strip_key
@contract(course_key='CourseKey', asset_type='None | basestring', start=int, maxresults=int, sort='tuple|None')
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of static assets for a course.
By default all assets are returned, but start and maxresults can be provided to limit the query.
Args:
course_key (CourseKey): course identifier
asset_type (str): type of asset, such as 'asset', 'video', etc. If None, return assets of all types.
start (int): optional - start at this asset number
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of 'ascending' or 'descending'
Returns:
List of AssetMetadata objects.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_all_asset_metadata(course_key, asset_type, start, maxresults, sort, **kwargs)
@contract(asset_key='AssetKey', user_id='int|long')
def delete_asset_metadata(self, asset_key, user_id):
"""
Deletes a single asset's metadata.
Arguments:
asset_id (AssetKey): locator containing original asset filename
user_id (int_long): user deleting the metadata
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.delete_asset_metadata(asset_key, user_id)
@contract(source_course_key='CourseKey', dest_course_key='CourseKey', user_id='int|long')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
user_id (int|long): user copying the asset metadata
"""
source_store = self._get_modulestore_for_courselike(source_course_key)
dest_store = self._get_modulestore_for_courselike(dest_course_key)
if source_store != dest_store:
with self.bulk_operations(dest_course_key):
# Get all the asset metadata in the source course.
all_assets = source_store.get_all_asset_metadata(source_course_key, 'asset')
# Store it all in the dest course.
for asset in all_assets:
new_asset_key = dest_course_key.make_asset_key('asset', asset.asset_id.path)
copied_asset = AssetMetadata(new_asset_key)
copied_asset.from_storable(asset.to_storable())
dest_store.save_asset_metadata(copied_asset, user_id)
else:
# Courses in the same modulestore can be handled by the modulestore itself.
source_store.copy_all_asset_metadata(source_course_key, dest_course_key, user_id)
@contract(asset_key='AssetKey', attr=str, user_id='int|long')
def set_asset_metadata_attr(self, asset_key, attr, value, user_id):
"""
Add/set the given attr on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr (str): which attribute to set
value: the value to set it to (any type pymongo accepts such as datetime, number, string)
user_id: (int|long): user setting the attribute
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, {attr: value}, user_id)
@contract(asset_key='AssetKey', attr_dict=dict, user_id='int|long')
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute/value pairs to set
user_id: (int|long): user setting the attributes
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, attr_dict, user_id)
@strip_key
def get_parent_location(self, location, **kwargs):
"""
returns the parent locations for a given location
"""
store = self._get_modulestore_for_courselike(location.course_key)
return store.get_parent_location(location, **kwargs)
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator from which the
copy was inherited.
"""
try:
store = self._verify_modulestore_support(usage_key.course_key, 'get_block_original_usage')
return store.get_block_original_usage(usage_key)
except NotImplementedError:
return None, None
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given course_id.
The return can be one of:
"xml" (for XML based courses),
"mongo" for old-style MongoDB backed courses,
"split" for new-style split MongoDB backed courses.
"""
return self._get_modulestore_for_courselike(course_id).get_modulestore_type()
@strip_key
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_orphans(course_key, **kwargs)
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
errs = {}
for store in self.modulestores:
errs.update(store.get_errored_courses())
return errs
@strip_key
def create_course(self, org, course, run, user_id, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
"""
# first make sure an existing course doesn't already exist in the mapping
course_key = self.make_course_key(org, course, run)
if course_key in self.mappings and self.mappings[course_key].has_course(course_key):
raise DuplicateCourseError(course_key, course_key)
# create the course
store = self._verify_modulestore_support(None, 'create_course')
course = store.create_course(org, course, run, user_id, **kwargs)
# add new course to the mapping
self.mappings[course_key] = store
return course
@strip_key
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Creates and returns a new library.
Args:
org (str): the organization that owns the course
library (str): the code/number/name of the library
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization - e.g. display_name
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a LibraryRoot
"""
# first make sure an existing course/lib doesn't already exist in the mapping
lib_key = LibraryLocator(org=org, library=library)
if lib_key in self.mappings:
raise DuplicateCourseError(lib_key, lib_key)
# create the library
store = self._verify_modulestore_support(None, 'create_library')
library = store.create_library(org, library, user_id, fields, **kwargs)
# add new library to the mapping
self.mappings[lib_key] = store
return library
@strip_key
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See the superclass for the general documentation.
If cloning w/in a store, delegates to that store's clone_course which, in order to be self-
sufficient, should handle the asset copying (call the same method as this one does)
If cloning between stores,
* copy the assets
* migrate the courseware
"""
source_modulestore = self._get_modulestore_for_courselike(source_course_id)
# for a temporary period of time, we may want to hardcode dest_modulestore as split if there's a split
# to have only course re-runs go to split. This code, however, uses the config'd priority
dest_modulestore = self._get_modulestore_for_courselike(dest_course_id)
if source_modulestore == dest_modulestore:
return source_modulestore.clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
if dest_modulestore.get_modulestore_type() == ModuleStoreEnum.Type.split:
split_migrator = SplitMigrator(dest_modulestore, source_modulestore)
split_migrator.migrate_mongo_course(source_course_id, user_id, dest_course_id.org,
dest_course_id.course, dest_course_id.run, fields, **kwargs)
# the super handles assets and any other necessities
super(MixedModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
else:
raise NotImplementedError("No code for cloning from {} to {}".format(
source_modulestore, dest_modulestore
))
@strip_key
def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(course_key, 'create_item')
return modulestore.create_item(user_id, course_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that is a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(parent_usage_key.course_key, 'create_child')
return modulestore.create_child(user_id, parent_usage_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
See :py:meth `ModuleStoreDraftAndPublished.import_xblock`
Defer to the course's modulestore if it supports this method
"""
store = self._verify_modulestore_support(course_key, 'import_xblock')
return store.import_xblock(user_id, course_key, block_type, block_id, fields, runtime)
@strip_key
def copy_from_template(self, source_keys, dest_key, user_id, **kwargs):
"""
See :py:meth `SplitMongoModuleStore.copy_from_template`
"""
store = self._verify_modulestore_support(dest_key.course_key, 'copy_from_template')
return store.copy_from_template(source_keys, dest_key, user_id)
@strip_key
def update_item(self, xblock, user_id, allow_not_found=False, **kwargs):
"""
Update the xblock persisted to be the same as the given for all types of fields
(content, children, and metadata) attribute the change to the given user.
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'update_item')
return store.update_item(xblock, user_id, allow_not_found, **kwargs)
@strip_key
def delete_item(self, location, user_id, **kwargs):
"""
Delete the given item from persistence. kwargs allow modulestore specific parameters.
"""
store = self._verify_modulestore_support(location.course_key, 'delete_item')
return store.delete_item(location, user_id=user_id, **kwargs)
def revert_to_published(self, location, user_id):
"""
Reverts an item to its last published version (recursively traversing all of its descendants).
If no published version exists, an InvalidVersionError is thrown.
If a published version exists but there is no draft version of this item or any of its descendants, this
method is a no-op.
:raises InvalidVersionError: if no published version exists for the location specified
"""
store = self._verify_modulestore_support(location.course_key, 'revert_to_published')
return store.revert_to_published(location, user_id)
def close_all_connections(self):
"""
Close all db connections
"""
for modulestore in self.modulestores:
modulestore.close_connections()
def _drop_database(self):
"""
A destructive operation to drop all databases and close all db connections.
Intended to be used by test code for cleanup.
"""
for modulestore in self.modulestores:
# drop database if the store supports it (read-only stores do not)
if hasattr(modulestore, '_drop_database'):
modulestore._drop_database() # pylint: disable=protected-access
@strip_key
def create_xblock(self, runtime, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Create the new xmodule but don't save it. Returns the new module.
Args:
runtime: :py:class `xblock.runtime` from another xblock in the same course. Providing this
significantly speeds up processing (inheritance and subsequent persistence)
course_key: :py:class `opaque_keys.CourseKey`
block_type: :py:class `string`: the string identifying the xblock type
block_id: the string uniquely identifying the block within the given course
fields: :py:class `dict` field_name, value pairs for initializing the xblock fields. Values
should be the pythonic types not the json serialized ones.
"""
store = self._verify_modulestore_support(course_key, 'create_xblock')
return store.create_xblock(runtime, course_key, block_type, block_id, fields or {}, **kwargs)
@strip_key
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
courses = []
for modulestore in self.modulestores:
courses.extend(modulestore.get_courses_for_wiki(wiki_slug, **kwargs))
return courses
def heartbeat(self):
"""
Delegate to each modulestore and package the results for the caller.
"""
# could be done in parallel threads if needed
return dict(
itertools.chain.from_iterable(
store.heartbeat().iteritems()
for store in self.modulestores
)
)
def has_published_version(self, xblock):
"""
Returns whether this xblock is draft, public, or private.
Returns:
PublishState.draft - content is in the process of being edited, but still has a previous
version deployed to LMS
PublishState.public - content is locked and deployed to LMS
PublishState.private - content is editable and not deployed to LMS
"""
course_id = xblock.scope_ids.usage_id.course_key
store = self._get_modulestore_for_courselike(course_id)
return store.has_published_version(xblock)
@strip_key
def publish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly published item.
"""
store = self._verify_modulestore_support(location.course_key, 'publish')
return store.publish(location, user_id, **kwargs)
@strip_key
def unpublish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly unpublished item.
"""
store = self._verify_modulestore_support(location.course_key, 'unpublish')
return store.unpublish(location, user_id, **kwargs)
def convert_to_draft(self, location, user_id):
"""
Create a copy of the source and mark its revision as draft.
Note: This method is to support the Mongo Modulestore and may be deprecated.
:param location: the location of the source (its revision must be None)
"""
store = self._verify_modulestore_support(location.course_key, 'convert_to_draft')
return store.convert_to_draft(location, user_id)
def has_changes(self, xblock):
"""
Checks if the given block has unpublished changes
:param xblock: the block to check
:return: True if the draft and published versions differ
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'has_changes')
return store.has_changes(xblock)
def check_supports(self, course_key, method):
"""
Verifies that the modulestore for a particular course supports a feature.
Returns True/false based on this.
"""
try:
self._verify_modulestore_support(course_key, method)
return True
except NotImplementedError:
return False
def _verify_modulestore_support(self, course_key, method):
"""
Finds and returns the store that contains the course for the given location, and verifying
that the store supports the given method.
Raises NotImplementedError if the found store does not support the given method.
"""
store = self._get_modulestore_for_courselike(course_key)
if hasattr(store, method):
return store
else:
raise NotImplementedError(u"Cannot call {} on store {}".format(method, store))
@property
def default_modulestore(self):
"""
Return the default modulestore
"""
thread_local_default_store = getattr(self.thread_cache, 'default_store', None)
if thread_local_default_store:
# return the thread-local cache, if found
return thread_local_default_store
else:
# else return the default store
return self.modulestores[0]
@contextmanager
def default_store(self, store_type):
"""
A context manager for temporarily changing the default store in the Mixed modulestore to the given store type
"""
# find the store corresponding to the given type
store = next((store for store in self.modulestores if store.get_modulestore_type() == store_type), None)
if not store:
raise Exception(u"Cannot find store of type {}".format(store_type))
prev_thread_local_store = getattr(self.thread_cache, 'default_store', None)
try:
self.thread_cache.default_store = store
yield
finally:
self.thread_cache.default_store = prev_thread_local_store
@contextmanager
def branch_setting(self, branch_setting, course_id=None):
"""
A context manager for temporarily setting the branch value for the given course' store
to the given branch_setting. If course_id is None, the default store is used.
"""
store = self._verify_modulestore_support(course_id, 'branch_setting')
with store.branch_setting(branch_setting, course_id):
yield
@contextmanager
def bulk_operations(self, course_id, emit_signals=True):
"""
A context manager for notifying the store of bulk operations.
If course_id is None, the default store is used.
"""
store = self._get_modulestore_for_courselike(course_id)
with store.bulk_operations(course_id, emit_signals):
yield
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
for store in self.modulestores:
store.ensure_indexes()
|
mmatyas/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/echo_close_data_wsh.py
|
258
|
#!/usr/bin/python
from mod_pywebsocket import msgutil
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
# This example handler accepts any request. See origin_check_wsh.py for how
# to reject access from untrusted scripts based on origin value.
pass # Always accept.
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
if line == _GOODBYE_MESSAGE:
return
request.ws_stream.send_message(line, binary=False)
|
eleonrk/SickRage
|
refs/heads/master
|
lib/hachoir_parser/template.py
|
58
|
"""
====================== 8< ============================
This file is an Hachoir parser template. Make a copy
of it, and adapt it to your needs.
You have to replace all "TODO" with you code.
====================== 8< ============================
TODO parser.
Author: TODO TODO
Creation date: YYYY-mm-DD
"""
# TODO: Just keep what you need
from hachoir_parser import Parser
from hachoir_core.field import (ParserError,
UInt8, UInt16, UInt32, String, RawBytes)
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
class TODOFile(Parser):
PARSER_TAGS = {
"id": "TODO",
"category": "TODO", # "archive", "audio", "container", ...
"file_ext": ("TODO",), # TODO: Example ("bmp",) to parse the file "image.bmp"
"mime": (u"TODO",), # TODO: Example: "image/png"
"min_size": 0, # TODO: Minimum file size (x bits, or x*8 in bytes)
"description": "TODO", # TODO: Example: "A bitmap picture"
}
# TODO: Choose between little or big endian
# endian = LITTLE_ENDIAN
# endian = BIG_ENDIAN
def validate(self):
# TODO: Check that file looks like your format
# Example: check first two bytes
# return (self.stream.readBytes(0, 2) == 'BM')
return False
def createFields(self):
# TODO: Write your parser using this model:
# yield UInt8(self, "name1", "description1")
# yield UInt16(self, "name2", "description2")
# yield UInt32(self, "name3", "description3")
# yield String(self, "name4", 1, "description4") # TODO: add ", charset="ASCII")"
# yield String(self, "name5", 1, "description5", charset="ASCII")
# yield String(self, "name6", 1, "description6", charset="ISO-8859-1")
# Read rest of the file (if any)
# TODO: You may remove this code
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
|
zaffra/Donate
|
refs/heads/master
|
django/conf/locale/ru/formats.py
|
80
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'j F Y г.'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j F Y г. G:i:s'
YEAR_MONTH_FORMAT = 'F Y г.'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
DATE_INPUT_FORMATS = (
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
'%Y-%m-%d', # '2006-10-25'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
pombredanne/tahoe-lafs
|
refs/heads/master
|
misc/simulators/hashbasedsig.py
|
8
|
#!python
# range of hash output lengths
range_L_hash = [128]
lg_M = 53 # lg(required number of signatures before losing security)
limit_bytes = 480000 # limit on signature length
limit_cost = 500 # limit on Mcycles_Sig + weight_ver*Mcycles_ver
weight_ver = 1 # how important verification cost is relative to signature cost
# (note: setting this too high will just exclude useful candidates)
L_block = 512 # bitlength of hash input blocks
L_pad = 64 # bitlength of hash padding overhead (for M-D hashes)
L_label = 80 # bitlength of hash position label
L_prf = 256 # bitlength of hash output when used as a PRF
cycles_per_byte = 15.8 # cost of hash
Mcycles_per_block = cycles_per_byte * L_block / (8 * 1000000.0)
from math import floor, ceil, log, log1p, pow, e
from sys import stderr
from gc import collect
def lg(x):
return log(x, 2)
def ln(x):
return log(x, e)
def ceil_log(x, B):
return int(ceil(log(x, B)))
def ceil_div(x, y):
return int(ceil(float(x) / float(y)))
def floor_div(x, y):
return int(floor(float(x) / float(y)))
# number of compression function evaluations to hash k bits
# we assume that there is a label in each block
def compressions(k):
return ceil_div(k + L_pad, L_block - L_label)
# sum of power series sum([pow(p, i) for i in range(n)])
def sum_powers(p, n):
if p == 1: return n
return (pow(p, n) - 1)/(p - 1)
def make_candidate(B, K, K1, K2, q, T, T_min, L_hash, lg_N, sig_bytes, c_sign, c_ver, c_ver_pm):
Mcycles_sign = c_sign * Mcycles_per_block
Mcycles_ver = c_ver * Mcycles_per_block
Mcycles_ver_pm = c_ver_pm * Mcycles_per_block
cost = Mcycles_sign + weight_ver*Mcycles_ver
if sig_bytes >= limit_bytes or cost > limit_cost:
return []
return [{
'B': B, 'K': K, 'K1': K1, 'K2': K2, 'q': q, 'T': T,
'T_min': T_min,
'L_hash': L_hash,
'lg_N': lg_N,
'sig_bytes': sig_bytes,
'c_sign': c_sign,
'Mcycles_sign': Mcycles_sign,
'c_ver': c_ver,
'c_ver_pm': c_ver_pm,
'Mcycles_ver': Mcycles_ver,
'Mcycles_ver_pm': Mcycles_ver_pm,
'cost': cost,
}]
# K1 = size of root Merkle tree
# K = size of middle Merkle trees
# K2 = size of leaf Merkle trees
# q = number of revealed private keys per signed message
# Winternitz with B < 4 is never optimal. For example, going from B=4 to B=2 halves the
# chain depth, but that is cancelled out by doubling (roughly) the number of digits.
range_B = xrange(4, 33)
M = pow(2, lg_M)
def calculate(K, K1, K2, q_max, L_hash, trees):
candidates = []
lg_K = lg(K)
lg_K1 = lg(K1)
lg_K2 = lg(K2)
# We want the optimal combination of q and T. That takes too much time and memory
# to search for directly, so we start by calculating the lowest possible value of T
# for any q. Then for potential values of T, we calculate the smallest q such that we
# will have at least L_hash bits of security against forgery using revealed private keys
# (i.e. this method of forgery is no easier than finding a hash preimage), provided
# that fewer than 2^lg_S_min messages are signed.
# min height of certification tree (excluding root and bottom layer)
T_min = ceil_div(lg_M - lg_K1, lg_K)
last_q = None
for T in xrange(T_min, T_min+21):
# lg(total number of leaf private keys)
lg_S = lg_K1 + lg_K*T
lg_N = lg_S + lg_K2
# Suppose that m signatures have been made. The number of times X that a given bucket has
# been chosen follows a binomial distribution B(m, p) where p = 1/S and S is the number of
# buckets. I.e. Pr(X = x) = C(m, x) * p^x * (1-p)^(m-x).
#
# If an attacker picks a random seed and message that falls into a bucket that has been
# chosen x times, then at most q*x private values in that bucket have been revealed, so
# (ignoring the possibility of guessing private keys, which is negligable) the attacker's
# success probability for a forgery using the revealed values is at most min(1, q*x / K2)^q.
#
# Let j = floor(K2/q). Conditioning on x, we have
#
# Pr(forgery) = sum_{x = 0..j}(Pr(X = x) * (q*x / K2)^q) + Pr(x > j)
# = sum_{x = 1..j}(Pr(X = x) * (q*x / K2)^q) + Pr(x > j)
#
# We lose nothing by approximating (q*x / K2)^q as 1 for x > 4, i.e. ignoring the resistance
# of the HORS scheme to forgery when a bucket has been chosen 5 or more times.
#
# Pr(forgery) < sum_{x = 1..4}(Pr(X = x) * (q*x / K2)^q) + Pr(x > 4)
#
# where Pr(x > 4) = 1 - sum_{x = 0..4}(Pr(X = x))
#
# We use log arithmetic here because values very close to 1 cannot be represented accurately
# in floating point, but their logarithms can (provided we use appropriate functions such as
# log1p).
lg_p = -lg_S
lg_1_p = log1p(-pow(2, lg_p))/ln(2) # lg(1-p), computed accurately
j = 5
lg_px = [lg_1_p * M]*j
# We approximate lg(M-x) as lg(M)
lg_px_step = lg_M + lg_p - lg_1_p
for x in xrange(1, j):
lg_px[x] = lg_px[x-1] - lg(x) + lg_px_step
q = None
# Find the minimum acceptable value of q.
for q_cand in xrange(1, q_max+1):
lg_q = lg(q_cand)
lg_pforge = [lg_px[x] + (lg_q*x - lg_K2)*q_cand for x in xrange(1, j)]
if max(lg_pforge) < -L_hash + lg(j) and lg_px[j-1] + 1.0 < -L_hash:
#print "K = %d, K1 = %d, K2 = %d, L_hash = %d, lg_K2 = %.3f, q = %d, lg_pforge_1 = %.3f, lg_pforge_2 = %.3f, lg_pforge_3 = %.3f" \
# % (K, K1, K2, L_hash, lg_K2, q, lg_pforge_1, lg_pforge_2, lg_pforge_3)
q = q_cand
break
if q is None or q == last_q:
# if q hasn't decreased, this will be strictly worse than the previous candidate
continue
last_q = q
# number of compressions to compute the Merkle hashes
(h_M, c_M, _) = trees[K]
(h_M1, c_M1, _) = trees[K1]
(h_M2, c_M2, (dau, tri)) = trees[K2]
# B = generalized Winternitz base
for B in range_B:
# n is the number of digits needed to sign the message representative and checksum.
# The representation is base-B, except that we allow the most significant digit
# to be up to 2B-1.
n_L = ceil_div(L_hash-1, lg(B))
firstL_max = floor_div(pow(2, L_hash)-1, pow(B, n_L-1))
C_max = firstL_max + (n_L-1)*(B-1)
n_C = ceil_log(ceil_div(C_max, 2), B)
n = n_L + n_C
firstC_max = floor_div(C_max, pow(B, n_C-1))
# Total depth of Winternitz hash chains. The chains for the most significant
# digit of the message representative and of the checksum may be a different
# length to those for the other digits.
c_D = (n-2)*(B-1) + firstL_max + firstC_max
# number of compressions to hash a Winternitz public key
c_W = compressions(n*L_hash)
# bitlength of a single Winternitz signature and authentication path
L_MW = (n + h_M ) * L_hash
L_MW1 = (n + h_M1) * L_hash
# bitlength of the HORS signature and authentication paths
# For all but one of the q authentication paths, one of the sibling elements in
# another path is made redundant where they intersect. This cancels out the hash
# that would otherwise be needed at the bottom of the path, making the total
# length of the signature q*h_M2 + 1 hashes, rather than q*(h_M2 + 1).
L_leaf = (q*h_M2 + 1) * L_hash
# length of the overall GMSS+HORS signature and seeds
sig_bytes = ceil_div(L_MW1 + T*L_MW + L_leaf + L_prf + ceil(lg_N), 8)
c_MW = K *(c_D + c_W) + c_M + ceil_div(K *n*L_hash, L_prf)
c_MW1 = K1*(c_D + c_W) + c_M1 + ceil_div(K1*n*L_hash, L_prf)
# For simplicity, c_sign and c_ver don't take into account compressions saved
# as a result of intersecting authentication paths in the HORS signature, so
# are slight overestimates.
c_sign = c_MW1 + T*c_MW + q*(c_M2 + 1) + ceil_div(K2*L_hash, L_prf)
# *expected* number of compressions to verify a signature
c_ver = c_D/2.0 + c_W + c_M1 + T*(c_D/2.0 + c_W + c_M) + q*(c_M2 + 1)
c_ver_pm = (1 + T)*c_D/2.0
candidates += make_candidate(B, K, K1, K2, q, T, T_min, L_hash, lg_N, sig_bytes, c_sign, c_ver, c_ver_pm)
return candidates
def search():
for L_hash in range_L_hash:
print >>stderr, "collecting... \r",
collect()
print >>stderr, "precomputing... \r",
"""
# d/dq (lg(q+1) + L_hash/q) = 1/(ln(2)*(q+1)) - L_hash/q^2
# Therefore lg(q+1) + L_hash/q is at a minimum when 1/(ln(2)*(q+1)) = L_hash/q^2.
# Let alpha = L_hash*ln(2), then from the quadratic formula, the integer q that
# minimizes lg(q+1) + L_hash/q is the floor or ceiling of (alpha + sqrt(alpha^2 - 4*alpha))/2.
# (We don't want the other solution near 0.)
alpha = floor(L_hash*ln(2)) # float
q = floor((alpha + sqrt(alpha*(alpha-4)))/2)
if lg(q+2) + L_hash/(q+1) < lg(q+1) + L_hash/q:
q += 1
lg_S_margin = lg(q+1) + L_hash/q
q_max = int(q)
q = floor(L_hash*ln(2)) # float
if lg(q+1) + L_hash/(q+1) < lg(q) + L_hash/q:
q += 1
lg_S_margin = lg(q) + L_hash/q
q_max = int(q)
"""
q_max = 4000
# find optimal Merkle tree shapes for this L_hash and each K
trees = {}
K_max = 50
c2 = compressions(2*L_hash)
c3 = compressions(3*L_hash)
for dau in xrange(0, 10):
a = pow(2, dau)
for tri in xrange(0, ceil_log(30-dau, 3)):
x = int(a*pow(3, tri))
h = dau + 2*tri
c_x = int(sum_powers(2, dau)*c2 + a*sum_powers(3, tri)*c3)
for y in xrange(1, x+1):
if tri > 0:
# If the bottom level has arity 3, then for every 2 nodes by which the tree is
# imperfect, we can save c3 compressions by pruning 3 leaves back to their parent.
# If the tree is imperfect by an odd number of nodes, we can prune one extra leaf,
# possibly saving a compression if c2 < c3.
c_y = c_x - floor_div(x-y, 2)*c3 - ((x-y) % 2)*(c3-c2)
else:
# If the bottom level has arity 2, then for each node by which the tree is
# imperfect, we can save c2 compressions by pruning 2 leaves back to their parent.
c_y = c_x - (x-y)*c2
if y not in trees or (h, c_y, (dau, tri)) < trees[y]:
trees[y] = (h, c_y, (dau, tri))
#for x in xrange(1, K_max+1):
# print x, trees[x]
candidates = []
progress = 0
fuzz = 0
complete = (K_max-1)*(2200-200)/100
for K in xrange(2, K_max+1):
for K2 in xrange(200, 2200, 100):
for K1 in xrange(max(2, K-fuzz), min(K_max, K+fuzz)+1):
candidates += calculate(K, K1, K2, q_max, L_hash, trees)
progress += 1
print >>stderr, "searching: %3d %% \r" % (100.0 * progress / complete,),
print >>stderr, "filtering... \r",
step = 2.0
bins = {}
limit = floor_div(limit_cost, step)
for bin in xrange(0, limit+2):
bins[bin] = []
for c in candidates:
bin = floor_div(c['cost'], step)
bins[bin] += [c]
del candidates
# For each in a range of signing times, find the best candidate.
best = []
for bin in xrange(0, limit):
candidates = bins[bin] + bins[bin+1] + bins[bin+2]
if len(candidates) > 0:
best += [min(candidates, key=lambda c: c['sig_bytes'])]
def format_candidate(candidate):
return ("%(B)3d %(K)3d %(K1)3d %(K2)5d %(q)4d %(T)4d "
"%(L_hash)4d %(lg_N)5.1f %(sig_bytes)7d "
"%(c_sign)7d (%(Mcycles_sign)7.2f) "
"%(c_ver)7d +/-%(c_ver_pm)5d (%(Mcycles_ver)5.2f +/-%(Mcycles_ver_pm)5.2f) "
) % candidate
print >>stderr, " \r",
if len(best) > 0:
print " B K K1 K2 q T L_hash lg_N sig_bytes c_sign (Mcycles) c_ver ( Mcycles )"
print "---- ---- ---- ------ ---- ---- ------ ------ --------- ------------------ --------------------------------"
best.sort(key=lambda c: (c['sig_bytes'], c['cost']))
last_sign = None
last_ver = None
for c in best:
if last_sign is None or c['c_sign'] < last_sign or c['c_ver'] < last_ver:
print format_candidate(c)
last_sign = c['c_sign']
last_ver = c['c_ver']
print
else:
print "No candidates found for L_hash = %d or higher." % (L_hash)
return
del bins
del best
print "Maximum signature size: %d bytes" % (limit_bytes,)
print "Maximum (signing + %d*verification) cost: %.1f Mcycles" % (weight_ver, limit_cost)
print "Hash parameters: %d-bit blocks with %d-bit padding and %d-bit labels, %.2f cycles per byte" \
% (L_block, L_pad, L_label, cycles_per_byte)
print "PRF output size: %d bits" % (L_prf,)
print "Security level given by L_hash is maintained for up to 2^%d signatures.\n" % (lg_M,)
search()
|
ee08b397/LeetCode-4
|
refs/heads/master
|
281 Zigzag Iterator.py
|
1
|
"""
Premium Question
"""
__author__ = 'Daniel'
class ZigzagIterator(object):
def __init__(self, v1, v2):
"""
Initialize your data structure here.
:type v1: List[int]
:type v2: List[int]
"""
self.mat = [v1, v2]
self.maxa = max((c, r) for r, c in enumerate(map(lambda x: len(x)-1, self.mat)))
self.i = 0
self.j = 0
self._reposition()
def _reposition(self):
while self.i >= len(self.mat) or self.j >= len(self.mat[self.i]):
if not self.hasNext():
return
elif self.i >= len(self.mat):
self.i = 0
self.j += 1
elif self.j >= len(self.mat[self.i]):
self.i += 1
def next(self):
"""
:rtype: int
"""
if not self.hasNext():
raise StopIteration
ret = self.mat[self.i][self.j]
self.i += 1
self._reposition()
return ret
def hasNext(self):
"""
:rtype: bool
"""
return self.j <= self.maxa[0]
if __name__ == "__main__":
v1 = [1, 2]
v2 = [3, 4, 5, 6]
itr = ZigzagIterator(v1, v2)
while itr.hasNext():
print itr.next()
|
Minkov/site
|
refs/heads/telerikacademy
|
event_socket_server/__init__.py
|
3
|
from .base_server import BaseServer
from .engines import *
from .handler import Handler
from .helpers import SizedPacketHandler, ZlibPacketHandler, ProxyProtocolMixin
def get_preferred_engine(choices=('epoll', 'poll', 'select')):
for choice in choices:
if choice in engines:
return engines[choice]
return engines['select']
|
Novasoft-India/OperERP-AM-Motors
|
refs/heads/master
|
openerp/addons/l10n_lu/wizard/__init__.py
|
63
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import print_vat
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Mrs-X/Darknet
|
refs/heads/master
|
test/functional/rpc_users.py
|
4
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (PivxTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to pivx.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser�"
rpcpassword = "rpcpassword=rpcpassword�"
with open(os.path.join(self.options.tmpdir+"/node0", "pivx.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
with open(os.path.join(self.options.tmpdir+"/node1", "pivx.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
rpcuserauthpair = "rpcuser�:rpcpassword�"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
pabloborrego93/edx-platform
|
refs/heads/master
|
common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
|
172
|
"""
Unit test for stub YouTube implementation.
"""
import unittest
import requests
from ..youtube import StubYouTubeService
class StubYouTubeServiceTest(unittest.TestCase):
def setUp(self):
super(StubYouTubeServiceTest, self).setUp()
self.server = StubYouTubeService()
self.url = "http://127.0.0.1:{0}/".format(self.server.port)
self.server.config['time_to_response'] = 0.0
self.addCleanup(self.server.shutdown)
def test_unused_url(self):
response = requests.get(self.url + 'unused_url')
self.assertEqual("Unused url", response.content)
@unittest.skip('Failing intermittently due to inconsistent responses from YT. See TE-871')
def test_video_url(self):
response = requests.get(
self.url + 'test_youtube/OEoXaMPEzfM?v=2&alt=jsonc&callback=callback_func'
)
# YouTube metadata for video `OEoXaMPEzfM` states that duration is 116.
self.assertEqual(
'callback_func({"data": {"duration": 116, "message": "I\'m youtube.", "id": "OEoXaMPEzfM"}})',
response.content
)
def test_transcript_url_equal(self):
response = requests.get(
self.url + 'test_transcripts_youtube/t__eq_exist'
)
self.assertEqual(
"".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.0" dur="1.0">',
'Equal transcripts</text></transcript>'
]), response.content
)
def test_transcript_url_not_equal(self):
response = requests.get(
self.url + 'test_transcripts_youtube/t_neq_exist',
)
self.assertEqual(
"".join([
'<?xml version="1.0" encoding="utf-8" ?>',
'<transcript><text start="1.1" dur="5.5">',
'Transcripts sample, different that on server',
'</text></transcript>'
]), response.content
)
def test_transcript_not_found(self):
response = requests.get(self.url + 'test_transcripts_youtube/some_id')
self.assertEqual(404, response.status_code)
def test_reset_configuration(self):
reset_config_url = self.url + 'del_config'
# add some configuration data
self.server.config['test_reset'] = 'This is a reset config test'
# reset server configuration
response = requests.delete(reset_config_url)
self.assertEqual(response.status_code, 200)
# ensure that server config dict is empty after successful reset
self.assertEqual(self.server.config, {})
|
zenodo/invenio
|
refs/heads/zenodo-master
|
invenio/legacy/bibingest/storage_engine_interface.py
|
13
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""The ingestion storage engine interface."""
__revision__ = "$Id$"
class StorageEngine(object):
"""The Ingestion Storage Engine default class"""
def __init__(self, configuration):
"""
The constructor.
"""
for (name, value) in configuration.iteritems():
setattr(self, name, value)
def reconfigure(self, configuration):
"""
"""
pass
def get_one(self, uid):
"""
Returns one ingestion package based on its UID.
"""
pass
def get_many(self, kwargs):
"""
Returns many ingestion packages based on the given arguments.
"""
pass
def store_one(self, kwargs):
"""
Sets one ingestion package with the given arguments.
"""
pass
def store_many(self, data):
"""
Sets many ingestion packages, as elements on the iterable data.
"""
pass
def remove_one(self, uid):
"""
Removes one ingestion package based on its UID.
"""
pass
def remove_many(self, kwargs):
"""
Removes many ingestion packages based on the given arguments.
"""
pass
def update_one(self, specs, kwargs):
"""
Updates one ingestion package (the first one found) based on the specs
with the given arguments.
"""
pass
def update_many(self, specs, kwargs):
"""
Updates many ingestion packages found based on the specs with the
given arguments.
"""
pass
def count(self):
"""
Returns the count of total entries for the specific ingestion package.
"""
pass
|
shainer/calamares
|
refs/heads/master
|
src/modules/plymouthcfg/main.py
|
3
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <http://github.com/calamares> ===
#
# Copyright 2016, Artoo <artoo@manjaro.org>
# Copyright 2017, Alf Gaida <agaida@siduction.org>
#
# Calamares is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Calamares is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calamares. If not, see <http://www.gnu.org/licenses/>.
import libcalamares
from libcalamares.utils import debug, target_env_call
class PlymouthController:
def __init__(self):
self.__root = libcalamares.globalstorage.value('rootMountPoint')
@property
def root(self):
return self.__root
def setTheme(self):
plymouth_theme = libcalamares.job.configuration["plymouth_theme"]
target_env_call(["sed", "-e", 's|^.*Theme=.*|Theme=' +
plymouth_theme + '|', "-i",
"/etc/plymouth/plymouthd.conf"])
def detect(self):
isPlymouth = target_env_call(["which", "plymouth"])
debug("which plymouth exit code: {!s}".format(isPlymouth))
if isPlymouth == 0:
libcalamares.globalstorage.insert("hasPlymouth", True)
else:
libcalamares.globalstorage.insert("hasPlymouth", False)
return isPlymouth
def run(self):
if self.detect() == 0:
if (("plymouth_theme" in libcalamares.job.configuration) and
(libcalamares.job.configuration["plymouth_theme"] is not None)):
self.setTheme()
return None
def run():
pc = PlymouthController()
return pc.run()
|
F-Secure/resource-api
|
refs/heads/dev
|
src/resource_api_http/__init__.py
|
12133432
| |
praekelt/unique-code-service
|
refs/heads/develop
|
unique_code_service/tests/helpers.py
|
1
|
from uuid import uuid4
def populate_pool(pool, flavours, suffixes):
return pool.import_unique_codes(str(uuid4()), 'md5', [
{
'flavour': flavour,
'unique_code': '%s%s' % (flavour, suffix),
}
for flavour in flavours
for suffix in suffixes
])
def mk_audit_params(request_id, transaction_id=None, user_id=None):
if transaction_id is None:
transaction_id = 'tx-%s' % (request_id,)
if user_id is None:
user_id = 'user-%s' % (transaction_id,)
return {
'request_id': request_id,
'transaction_id': transaction_id,
'user_id': user_id,
}
def sorted_dicts(dicts):
return sorted(dicts, key=lambda d: sorted(d.items()))
|
MozillaSecurity/FuzzManager
|
refs/heads/master
|
TaskStatusReporter/__main__.py
|
1
|
# encoding: utf-8
'''
TaskStatusReporter -- Simple Task status reporting tool for TaskManager
Provide process and class level interfaces to send simple textual
status reports to TaskManager.
@author: Jesse Schwartzentruber (:truber)
@license:
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
@contact: jschwartzentruber@mozilla.com
'''
import sys
from .TaskStatusReporter import main
sys.exit(main())
|
samuelhavron/heroku-buildpack-python
|
refs/heads/master
|
Python-3.4.3/Lib/test/test_threadedtempfile.py
|
171
|
"""
Create and delete FILES_PER_THREAD temp files (via tempfile.TemporaryFile)
in each of NUM_THREADS threads, recording the number of successes and
failures. A failure is a bug in tempfile, and may be due to:
+ Trying to create more than one tempfile with the same name.
+ Trying to delete a tempfile that doesn't still exist.
+ Something we've never seen before.
By default, NUM_THREADS == 20 and FILES_PER_THREAD == 50. This is enough to
create about 150 failures per run under Win98SE in 2.0, and runs pretty
quickly. Guido reports needing to boost FILES_PER_THREAD to 500 before
provoking a 2.0 failure under Linux.
"""
NUM_THREADS = 20
FILES_PER_THREAD = 50
import tempfile
from test.support import threading_setup, threading_cleanup, run_unittest, import_module
threading = import_module('threading')
import unittest
import io
from traceback import print_exc
startEvent = threading.Event()
class TempFileGreedy(threading.Thread):
error_count = 0
ok_count = 0
def run(self):
self.errors = io.StringIO()
startEvent.wait()
for i in range(FILES_PER_THREAD):
try:
f = tempfile.TemporaryFile("w+b")
f.close()
except:
self.error_count += 1
print_exc(file=self.errors)
else:
self.ok_count += 1
class ThreadedTempFileTest(unittest.TestCase):
def test_main(self):
threads = []
thread_info = threading_setup()
for i in range(NUM_THREADS):
t = TempFileGreedy()
threads.append(t)
t.start()
startEvent.set()
ok = 0
errors = []
for t in threads:
t.join()
ok += t.ok_count
if t.error_count:
errors.append(str(t.name) + str(t.errors.getvalue()))
threading_cleanup(*thread_info)
msg = "Errors: errors %d ok %d\n%s" % (len(errors), ok,
'\n'.join(errors))
self.assertEqual(errors, [], msg)
self.assertEqual(ok, NUM_THREADS * FILES_PER_THREAD)
def test_main():
run_unittest(ThreadedTempFileTest)
if __name__ == "__main__":
test_main()
|
makinacorpus/django
|
refs/heads/master
|
django/contrib/gis/db/backends/oracle/compiler.py
|
120
|
from django.contrib.gis.db.models.sql.compiler import GeoSQLCompiler as BaseGeoSQLCompiler
from django.db.backends.oracle import compiler
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(BaseGeoSQLCompiler, SQLCompiler):
pass
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
pass
class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, GeoSQLCompiler):
pass
|
CraigRhodes/fs_site
|
refs/heads/master
|
freesources/test_freesources/test_views.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.test import Client
from freesources import views
import unittest
# test for item addition
class TestCase(TestCase):
fixtures = ['test_data.json']
def setUp(self):
self.user = User.objects.create_user('testace',
'ace@gmail.com', 'ace1234')
# self.post = Post.objects.create(title="Test Post #1",
# body="Test Post #1 Body",
# author=self.user,
# date=datetime.datetime.now())
self.c = Client()
def test_post_creation(self):
"""
Tests that we can create a Pos
"""
self.assertEqual(self.post.title, "Test Post #1")
self.assertEqual(self.post.author, self.user)
# test for feedback addition
def test_i_feedback_this(self):
"""
Tests a new user marking the story as read.
"""
self.c.login(username='newsposter', password='newspass')
response = self.c.post('/freesources/feedback/1/', {'add':True})
self.assertEqual(response.status_code, 200)
self.assertEquals(response.content, '{\n "read": true\n}')
#test for user sign up
def test_user(self):
pass
|
migueldiascosta/easybuild-framework
|
refs/heads/master
|
easybuild/toolchains/linalg/blacs.py
|
3
|
##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for BLACS as toolchain linear algebra library.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.tools.toolchain.linalg import LinAlg
class Blacs(LinAlg):
"""
Trivial class, provides BLACS support.
"""
BLACS_MODULE_NAME = ['BLACS']
BLACS_LIB = ["blacsCinit", "blacsF77init", "blacs"]
BLACS_LIB_GROUP = True
def _set_blacs_variables(self):
"""Skip setting BLACS variables if it is not required (e.g., with recent ScaLAPACK versions)."""
if self.is_required(self.BLACS_MODULE_NAME[0]):
super(Blacs, self)._set_blacs_variables()
|
eayunstack/nova
|
refs/heads/develop
|
nova/compute/monitors/cpu_monitor.py
|
63
|
# Copyright 2013 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CPU monitor to retrieve CPU information
"""
from nova.compute import monitors
class _CPUMonitorBase(monitors.ResourceMonitorBase):
"""CPU monitor base."""
def _get_cpu_frequency(self, **kwargs):
"""Return CPU current frequency and its timestamp."""
return None, None
def _get_cpu_user_time(self, **kwargs):
"""Return CPU user mode time and its timestamp."""
return None, None
def _get_cpu_kernel_time(self, **kwargs):
"""Return CPU kernel time and its timestamp."""
return None, None
def _get_cpu_idle_time(self, **kwargs):
"""Return CPU idle time and its timestamp."""
return None, None
def _get_cpu_iowait_time(self, **kwargs):
"""Return CPU I/O wait time and its timestamp."""
return None, None
def _get_cpu_user_percent(self, **kwargs):
"""Return CPU user mode percentage and its timestamp."""
return None, None
def _get_cpu_kernel_percent(self, **kwargs):
"""Return CPU kernel percentage and its timestamp."""
return None, None
def _get_cpu_idle_percent(self, **kwargs):
"""Return CPU idle percentage and its timestamp."""
return None, None
def _get_cpu_iowait_percent(self, **kwargs):
"""Return CPU I/O wait percentage and its timestamp."""
return None, None
def _get_cpu_percent(self, **kwargs):
"""Return generic CPU utilization and its timestamp."""
return None, None
|
infinity0/l33tutils
|
refs/heads/master
|
data/read/solyml.py
|
1
|
#!/usr/bin/python -uO
"""Converts between SOL (Adobe Flash savedata) and YAML."""
import pyamf.sol, yaml#, syck
import sys, os, os.path, shutil, traceback
pyamf.sol.decode_ = pyamf.sol.decode
def decode_unstrict(str):
return pyamf.sol.decode_(str, False)
pyamf.sol.decode = decode_unstrict
class FileRW(object):
def __init__(self, ifn, isuf, osuf):
if not ifn.endswith(isuf):
raise ValueError("suffix %s does not match: %s" % (isuf, ifn))
self.ifn = ifn
self.ofn = ifn[:-len(isuf)] + osuf
self.idata = None
self.odata = None
def __call__(self):
print "reading %s..." % self.ifn,
self._load()
print "done"
self._convert()
print "writing %s..." % self.ofn,
self._save()
print "done"
def _convert(self):
self.odata = self.idata
def _load(self):
raise NotImplementedError
def _save(self):
raise NotImplementedError
class Sol2YmlRW(FileRW):
def _load(self):
self.idata = pyamf.sol.load(self.ifn)
def _save(self):
with open(self.ofn, 'w') as fp:
yaml.dump(self.odata, fp, yaml.CDumper)
#syck.dump(self.idata)
class Yml2SolRW(FileRW):
def _load(self):
with open(self.ifn) as fp:
self.idata = yaml.load(fp, yaml.CLoader)
def _save(self):
pyamf.sol.save(self.odata, self.ofn)
class Sol2SolRW(FileRW):
def _load(self):
self.idata = pyamf.sol.load(self.ifn)
def _convert(self):
FileRW._convert(self)
fn = self.ifn + ".bak"
print "backing up %s to %s..." % (self.ifn, fn),
shutil.copy(self.ifn, fn)
print "done"
def _save(self):
pyamf.sol.save(self.odata, self.ofn)
def sol2yml(fn):
return Sol2YmlRW(fn, '.sol', '.yml')()
def yml2sol(fn):
return Yml2SolRW(fn, '.yml', '.sol')()
def sol2sol(fn):
return Sol2SolRW(fn, '.sol', '.sol')()
SUBCOMMANDS = ("sol2yml", "yml2sol", "sol2sol")
def run_all(cmd, *files):
for fn in files:
try:
globals()[cmd](fn)
except Exception, e:
print "skip fn:"
traceback.print_exc()
def main(argv):
cmd = os.path.basename(argv[0])
if cmd in SUBCOMMANDS:
return run_all(cmd, *argv[1:])
cmd = os.path.basename(argv[1])
if cmd in SUBCOMMANDS:
return run_all(cmd, *argv[2:])
help()
def help():
print 'Usage: solyml.py {yml2sol|sol2yml|sol2sol} <FILE> ...'
print 'Convert a yml or sol [file] to the other format, or attempt to re-write a corrupted sol file.'
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
alsrgv/tensorflow
|
refs/heads/master
|
tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_ops_test.py
|
8
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.beam_search_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import itertools
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _transpose_batch_time(x):
return np.transpose(x, [1, 0, 2]).astype(np.int32)
class GatherTreeTest(test.TestCase):
def testGatherTreeOne(self):
# (max_time = 4, batch_size = 1, beams = 3)
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
expected_result = _transpose_batch_time([[[2, 2, 2], [6, 5, 6], [7, 8, 9],
[10, 10, 10]]])
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
with self.cached_session(use_gpu=True):
self.assertAllEqual(expected_result, self.evaluate(beams))
def testBadParentValuesOnCPU(self):
# (batch_size = 1, max_time = 4, beams = 3)
# bad parent in beam 1 time 1
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, -1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
with ops.device("/cpu:0"):
with self.assertRaisesOpError(
r"parent id -1 at \(batch, time, beam\) == \(0, 0, 1\)"):
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
self.evaluate(beams)
def testBadParentValuesOnGPU(self):
# Only want to run this test on CUDA devices, as gather_tree is not
# registered for SYCL devices.
if not test.is_gpu_available(cuda_only=True):
return
# (max_time = 4, batch_size = 1, beams = 3)
# bad parent in beam 1 time 1; appears as a negative index at time 0
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, -1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
expected_result = _transpose_batch_time([[[2, -1, 2], [6, 5, 6], [7, 8, 9],
[10, 10, 10]]])
with ops.device("/device:GPU:0"):
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
self.assertAllEqual(expected_result, self.evaluate(beams))
def testGatherTreeBatch(self):
batch_size = 10
beam_width = 15
max_time = 8
max_sequence_lengths = [0, 1, 2, 4, 7, 8, 9, 10, 11, 0]
end_token = 5
with self.cached_session(use_gpu=True):
step_ids = np.random.randint(
0, high=end_token + 1, size=(max_time, batch_size, beam_width))
parent_ids = np.random.randint(
0, high=beam_width - 1, size=(max_time, batch_size, beam_width))
beams = beam_search_ops.gather_tree(
step_ids=step_ids.astype(np.int32),
parent_ids=parent_ids.astype(np.int32),
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
self.assertEqual((max_time, batch_size, beam_width), beams.shape)
beams_value = self.evaluate(beams)
for b in range(batch_size):
# Past max_sequence_lengths[b], we emit all end tokens.
b_value = beams_value[max_sequence_lengths[b]:, b, :]
self.assertAllClose(b_value, end_token * np.ones_like(b_value))
for batch, beam in itertools.product(
range(batch_size), range(beam_width)):
v = np.squeeze(beams_value[:, batch, beam])
if end_token in v:
found_bad = np.where(v == -1)[0]
self.assertEqual(0, len(found_bad))
found = np.where(v == end_token)[0]
found = found[0] # First occurrence of end_token.
# If an end_token is found, everything before it should be a
# valid id and everything after it should be -1.
if found > 0:
self.assertAllEqual(
v[:found - 1] >= 0, np.ones_like(v[:found - 1], dtype=bool))
self.assertAllClose(v[found + 1:],
end_token * np.ones_like(v[found + 1:]))
if __name__ == "__main__":
test.main()
|
michalkurka/h2o-3
|
refs/heads/master
|
h2o-py/tests/testdir_misc/pyunit_frame_as_list.py
|
8
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def frame_as_list():
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate.csv.zip"))
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k.zip"))
res1 = h2o.as_list(iris, use_pandas=False)
res1 = list(zip(*res1))
assert abs(float(res1[0][9]) - 4.4) < 1e-10 and abs(float(res1[1][9]) - 2.9) < 1e-10 and \
abs(float(res1[2][9]) - 1.4) < 1e-10, "incorrect values"
res2 = h2o.as_list(prostate, use_pandas=False)
res2 = list(zip(*res2))
assert abs(float(res2[0][7]) - 7) < 1e-10 and abs(float(res2[1][7]) - 0) < 1e-10 and \
abs(float(res2[2][7]) - 68) < 1e-10, "incorrect values"
res3 = h2o.as_list(airlines, use_pandas=False)
res3 = list(zip(*res3))
assert abs(float(res3[0][4]) - 1987) < 1e-10 and abs(float(res3[1][4]) - 10) < 1e-10 and \
abs(float(res3[2][4]) - 18) < 1e-10, "incorrect values"
if __name__ == "__main__":
pyunit_utils.standalone_test(frame_as_list)
else:
frame_as_list()
|
fvpolpeta/devide
|
refs/heads/master
|
modules/vtk_basic/vtkExtractTemporalFieldData.py
|
7
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkExtractTemporalFieldData(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkExtractTemporalFieldData(), 'Processing.',
('vtkDataSet',), ('vtkRectilinearGrid',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
psychok7/django-project-template-yadpt
|
refs/heads/master
|
setup.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
# https://packaging.python.org/distributing/#packaging-your-project
# https://packaging.python.org/distributing/#uploading-your-project-to-pypi
# https://docs.djangoproject.com/en/1.11/intro/reusable-apps/
# http://peterdowns.com/posts/first-time-with-pypi.html
VERSION = '1.7'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-yadpt-starter',
packages=find_packages(),
include_package_data=True,
version=VERSION,
description=(
'django-yadpt-starter is Yet Another Django Project Template '
'skeleton for Django projects'
),
long_description=long_description,
author='Nuno Khan',
author_email='nunok7@gmail.com',
url='https://github.com/psychok7/django-yadpt-starter',
download_url=(
'https://github.com/psychok7/django-yadpt-starter/tarball/v' + VERSION
),
keywords=[
'django', 'template', 'project templates', 'python', 'https',
'letsencrypt', 'starter', 'cookiecutter', 'docker', 'docker-compose'
],
scripts=['minimal/django-yadpt-starter.py'],
install_requires=['Django >= 1.8', 'six >= 1.10.0'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
license='MIT',
)
|
geraldoandradee/pytest
|
refs/heads/master
|
testing/test_assertinterpret.py
|
4
|
"PYTEST_DONT_REWRITE"
import pytest, py
from _pytest.assertion import util
def exvalue():
return py.std.sys.exc_info()[1]
def f():
return 2
def test_not_being_rewritten():
assert "@py_builtins" not in globals()
def test_assert():
try:
assert f() == 3
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_with_explicit_message():
try:
assert f() == 3, "hello"
except AssertionError:
e = exvalue()
assert e.msg == 'hello'
def test_assert_within_finally():
excinfo = py.test.raises(ZeroDivisionError, """
try:
1/0
finally:
i = 42
""")
s = excinfo.exconly()
assert py.std.re.search("division.+by zero", s) is not None
#def g():
# A.f()
#excinfo = getexcinfo(TypeError, g)
#msg = getmsg(excinfo)
#assert msg.find("must be called with A") != -1
def test_assert_multiline_1():
try:
assert (f() ==
3)
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_multiline_2():
try:
assert (f() == (4,
3)[-1])
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 ==')
def test_in():
try:
assert "hi" in [1, 2]
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 'hi' in")
def test_is():
try:
assert 1 is 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 is 2")
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_attrib():
class Foo(object):
b = 1
i = Foo()
try:
assert i.b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_attrib_inst():
class Foo(object):
b = 1
try:
assert Foo().b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_len():
l = list(range(42))
try:
assert len(l) == 100
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 42 == 100")
assert "where 42 = len([" in s
def test_assert_non_string_message():
class A:
def __str__(self):
return "hello"
try:
assert 0 == 1, A()
except AssertionError:
e = exvalue()
assert e.msg == "hello"
def test_assert_keyword_arg():
def f(x=3):
return False
try:
assert f(x=5)
except AssertionError:
e = exvalue()
assert "x=5" in e.msg
# These tests should both fail, but should fail nicely...
class WeirdRepr:
def __repr__(self):
return '<WeirdRepr\nsecond line>'
def bug_test_assert_repr():
v = WeirdRepr()
try:
assert v == 1
except AssertionError:
e = exvalue()
assert e.msg.find('WeirdRepr') != -1
assert e.msg.find('second line') != -1
assert 0
def test_assert_non_string():
try:
assert 0, ['list']
except AssertionError:
e = exvalue()
assert e.msg.find("list") != -1
def test_assert_implicit_multiline():
try:
x = [1,2,3]
assert x != [1,
2, 3]
except AssertionError:
e = exvalue()
assert e.msg.find('assert [1, 2, 3] !=') != -1
def test_assert_with_brokenrepr_arg():
class BrokenRepr:
def __repr__(self): 0 / 0
e = AssertionError(BrokenRepr())
if e.msg.find("broken __repr__") == -1:
py.test.fail("broken __repr__ not handle correctly")
def test_multiple_statements_per_line():
try:
a = 1; assert a == 2
except AssertionError:
e = exvalue()
assert "assert 1 == 2" in e.msg
def test_power():
try:
assert 2**3 == 7
except AssertionError:
e = exvalue()
assert "assert (2 ** 3) == 7" in e.msg
class TestView:
def setup_class(cls):
cls.View = pytest.importorskip("_pytest.assertion.oldinterpret").View
def test_class_dispatch(self):
### Use a custom class hierarchy with existing instances
class Picklable(self.View):
pass
class Simple(Picklable):
__view__ = object
def pickle(self):
return repr(self.__obj__)
class Seq(Picklable):
__view__ = list, tuple, dict
def pickle(self):
return ';'.join(
[Picklable(item).pickle() for item in self.__obj__])
class Dict(Seq):
__view__ = dict
def pickle(self):
return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
assert Picklable(123).pickle() == '123'
assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
assert Picklable({1:2}).pickle() == '1!2'
def test_viewtype_class_hierarchy(self):
# Use a custom class hierarchy based on attributes of existing instances
class Operation:
"Existing class that I don't want to change."
def __init__(self, opname, *args):
self.opname = opname
self.args = args
existing = [Operation('+', 4, 5),
Operation('getitem', '', 'join'),
Operation('setattr', 'x', 'y', 3),
Operation('-', 12, 1)]
class PyOp(self.View):
def __viewkey__(self):
return self.opname
def generate(self):
return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
class PyBinaryOp(PyOp):
__view__ = ('+', '-', '*', '/')
def generate(self):
return '%s %s %s' % (self.args[0], self.opname, self.args[1])
codelines = [PyOp(op).generate() for op in existing]
assert codelines == ["4 + 5", "getitem('', 'join')",
"setattr('x', 'y', 3)", "12 - 1"]
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_assert_customizable_reprcompare(monkeypatch):
monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
try:
assert 3 == 4
except AssertionError:
e = exvalue()
s = str(e)
assert "hello" in s
def test_assert_long_source_1():
try:
assert len == [
(None, ['somet text', 'more text']),
]
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_long_source_2():
try:
assert(len == [
(None, ['somet text', 'more text']),
])
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_raise_alias(testdir):
testdir.makepyfile("""
"PYTEST_DONT_REWRITE"
import sys
EX = AssertionError
def test_hello():
raise EX("hello"
"multi"
"line")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello*",
"*raise EX*",
"*1 failed*",
])
@pytest.mark.skipif("sys.version_info < (2,5)")
def test_assert_raise_subclass():
class SomeEx(AssertionError):
def __init__(self, *args):
super(SomeEx, self).__init__()
try:
raise SomeEx("hello")
except AssertionError:
s = str(exvalue())
assert 're-run' not in s
assert 'could not determine' in s
def test_assert_raises_in_nonzero_of_object_pytest_issue10():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return True
try:
assert not(myany(A() < 0))
except AssertionError:
e = exvalue()
s = str(e)
assert "<MY42 object> < 0" in s
@py.test.mark.skipif("sys.version_info >= (2,6)")
def test_oldinterpret_importation():
# we had a cyclic import there
# requires pytest on sys.path
res = py.std.subprocess.call([
py.std.sys.executable, '-c', str(py.code.Source("""
try:
from _pytest.assertion.newinterpret import interpret
except ImportError:
from _pytest.assertion.oldinterpret import interpret
"""))
])
assert res == 0
|
zzzeek/test
|
refs/heads/master
|
test/test_inheritance.py
|
4
|
from mako.template import Template
from mako import lookup, util
import unittest
from util import flatten_result, result_lines
class InheritanceTest(unittest.TestCase):
def test_basic(self):
collection = lookup.TemplateLookup()
collection.put_string('main', """
<%inherit file="base"/>
<%def name="header()">
main header.
</%def>
this is the content.
""")
collection.put_string('base', """
This is base.
header: ${self.header()}
body: ${self.body()}
footer: ${self.footer()}
<%def name="footer()">
this is the footer. header again ${next.header()}
</%def>
""")
assert result_lines(collection.get_template('main').render()) == [
'This is base.',
'header:',
'main header.',
'body:',
'this is the content.',
'footer:',
'this is the footer. header again',
'main header.'
]
def test_multilevel_nesting(self):
collection = lookup.TemplateLookup()
collection.put_string('main', """
<%inherit file="layout"/>
<%def name="d()">main_d</%def>
main_body ${parent.d()}
full stack from the top:
${self.name} ${parent.name} ${parent.context['parent'].name} ${parent.context['parent'].context['parent'].name}
""")
collection.put_string('layout', """
<%inherit file="general"/>
<%def name="d()">layout_d</%def>
layout_body
parent name: ${parent.name}
${parent.d()}
${parent.context['parent'].d()}
${next.body()}
""")
collection.put_string('general', """
<%inherit file="base"/>
<%def name="d()">general_d</%def>
general_body
${next.d()}
${next.context['next'].d()}
${next.body()}
""")
collection.put_string('base', """
base_body
full stack from the base:
${self.name} ${self.context['parent'].name} ${self.context['parent'].context['parent'].name} ${self.context['parent'].context['parent'].context['parent'].name}
${next.body()}
<%def name="d()">base_d</%def>
""")
assert result_lines(collection.get_template('main').render()) == [
'base_body',
'full stack from the base:',
'self:main self:layout self:general self:base',
'general_body',
'layout_d',
'main_d',
'layout_body',
'parent name: self:general',
'general_d',
'base_d',
'main_body layout_d',
'full stack from the top:',
'self:main self:layout self:general self:base'
]
def test_includes(self):
"""test that an included template also has its full hierarchy invoked."""
collection = lookup.TemplateLookup()
collection.put_string("base", """
<%def name="a()">base_a</%def>
This is the base.
${next.body()}
End base.
""")
collection.put_string("index","""
<%inherit file="base"/>
this is index.
a is: ${self.a()}
<%include file="secondary"/>
""")
collection.put_string("secondary","""
<%inherit file="base"/>
this is secondary.
a is: ${self.a()}
""")
assert result_lines(collection.get_template("index").render()) == [
'This is the base.',
'this is index.',
'a is: base_a',
'This is the base.',
'this is secondary.',
'a is: base_a',
'End base.',
'End base.'
]
def test_namespaces(self):
"""test that templates used via <%namespace> have access to an inheriting 'self', and that
the full 'self' is also exported."""
collection = lookup.TemplateLookup()
collection.put_string("base", """
<%def name="a()">base_a</%def>
<%def name="b()">base_b</%def>
This is the base.
${next.body()}
""")
collection.put_string("layout", """
<%inherit file="base"/>
<%def name="a()">layout_a</%def>
This is the layout..
${next.body()}
""")
collection.put_string("index","""
<%inherit file="base"/>
<%namespace name="sc" file="secondary"/>
this is index.
a is: ${self.a()}
sc.a is: ${sc.a()}
sc.b is: ${sc.b()}
sc.c is: ${sc.c()}
sc.body is: ${sc.body()}
""")
collection.put_string("secondary","""
<%inherit file="layout"/>
<%def name="c()">secondary_c. a is ${self.a()} b is ${self.b()} d is ${self.d()}</%def>
<%def name="d()">secondary_d.</%def>
this is secondary.
a is: ${self.a()}
c is: ${self.c()}
""")
assert result_lines(collection.get_template('index').render()) == ['This is the base.',
'this is index.',
'a is: base_a',
'sc.a is: layout_a',
'sc.b is: base_b',
'sc.c is: secondary_c. a is layout_a b is base_b d is secondary_d.',
'sc.body is:',
'this is secondary.',
'a is: layout_a',
'c is: secondary_c. a is layout_a b is base_b d is secondary_d.'
]
def test_pageargs(self):
collection = lookup.TemplateLookup()
collection.put_string("base", """
this is the base.
<%
sorted_ = pageargs.items()
sorted_ = sorted(sorted_)
%>
pageargs: (type: ${type(pageargs)}) ${sorted_}
<%def name="foo()">
${next.body(**context.kwargs)}
</%def>
${foo()}
""")
collection.put_string("index", """
<%inherit file="base"/>
<%page args="x, y, z=7"/>
print ${x}, ${y}, ${z}
""")
if util.py3k:
assert result_lines(collection.get_template('index').render_unicode(x=5,y=10)) == [
"this is the base.",
"pageargs: (type: <class 'dict'>) [('x', 5), ('y', 10)]",
"print 5, 10, 7"
]
else:
assert result_lines(collection.get_template('index').render_unicode(x=5,y=10)) == [
"this is the base.",
"pageargs: (type: <type 'dict'>) [('x', 5), ('y', 10)]",
"print 5, 10, 7"
]
def test_pageargs_2(self):
collection = lookup.TemplateLookup()
collection.put_string("base", """
this is the base.
${next.body(**context.kwargs)}
<%def name="foo(**kwargs)">
${next.body(**kwargs)}
</%def>
<%def name="bar(**otherargs)">
${next.body(z=16, **context.kwargs)}
</%def>
${foo(x=12, y=15, z=8)}
${bar(x=19, y=17)}
""")
collection.put_string("index", """
<%inherit file="base"/>
<%page args="x, y, z=7"/>
pageargs: ${x}, ${y}, ${z}
""")
assert result_lines(collection.get_template('index').render(x=5,y=10)) == [
"this is the base.",
"pageargs: 5, 10, 7",
"pageargs: 12, 15, 8",
"pageargs: 5, 10, 16"
]
def test_pageargs_err(self):
collection = lookup.TemplateLookup()
collection.put_string("base", """
this is the base.
${next.body()}
""")
collection.put_string("index", """
<%inherit file="base"/>
<%page args="x, y, z=7"/>
print ${x}, ${y}, ${z}
""")
try:
print collection.get_template('index').render(x=5,y=10)
assert False
except TypeError:
assert True
def test_toplevel(self):
collection = lookup.TemplateLookup()
collection.put_string("base", """
this is the base.
${next.body()}
""")
collection.put_string("index", """
<%inherit file="base"/>
this is the body
""")
assert result_lines(collection.get_template('index').render()) == [
"this is the base.",
"this is the body"
]
assert result_lines(collection.get_template('index').get_def("body").render()) == [
"this is the body"
]
def test_dynamic(self):
collection = lookup.TemplateLookup()
collection.put_string("base", """
this is the base.
${next.body()}
""")
collection.put_string("index", """
<%!
def dyn(context):
if context.get('base', None) is not None:
return 'base'
else:
return None
%>
<%inherit file="${dyn(context)}"/>
this is index.
""")
assert result_lines(collection.get_template('index').render()) == [
'this is index.'
]
assert result_lines(collection.get_template('index').render(base=True)) == [
'this is the base.',
'this is index.'
]
def test_in_call(self):
collection = lookup.TemplateLookup()
collection.put_string("/layout.html","""
Super layout!
<%call expr="self.grid()">
${next.body()}
</%call>
Oh yea!
<%def name="grid()">
Parent grid
${caller.body()}
End Parent
</%def>
""")
collection.put_string("/subdir/layout.html", """
${next.body()}
<%def name="grid()">
Subdir grid
${caller.body()}
End subdir
</%def>
<%inherit file="/layout.html"/>
""")
collection.put_string("/subdir/renderedtemplate.html","""
Holy smokes!
<%inherit file="/subdir/layout.html"/>
""")
#print collection.get_template("/layout.html").code
#print collection.get_template("/subdir/renderedtemplate.html").render()
assert result_lines(collection.get_template("/subdir/renderedtemplate.html").render()) == [
"Super layout!",
"Subdir grid",
"Holy smokes!",
"End subdir",
"Oh yea!"
]
|
hiptools/hiptools
|
refs/heads/master
|
write_utf.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""модуль для записи информации в файл. Два первых аргумента - обязательны.
Аргументы: 1 имя файла (куда писать), 2 список со строками, 3 параметр записи
4 кодировка """
class write_gen:
def write_file(self, f_name, data, mode='w', enc='utf8'):
utf = open(f_name, mode)
for x in data:
utf.write(x.encode(enc))
print 'writing ', f_name
utf.close()
def write_line(self, f_name, data, mode='w', enc='utf8'):
utf = open(f_name, mode)
# utf.write(data[0].encode(enc))
utf.write(data.encode(enc))
print 'writing ', f_name
utf.close()
|
normtown/SickRage
|
refs/heads/master
|
autoProcessTV/lib/requests/packages/chardet/chardetect.py
|
1785
|
#!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
|
imd8594/GroupMeReddit
|
refs/heads/master
|
groupmebot/user.py
|
1
|
"""
Ian Dansereau
GroupMeReddit
user.py
7/30/16
"""
class User(object):
def __init__(self, user_id, role, nickname):
self._user_id = user_id
self._role = role
self._nickname = nickname
def getId(self):
return self._user_id
def getRole(self):
return self._role
def getNickname(self):
return self._nickname
def setRole(self, role):
self._role = role
def isAdmin(self):
if self._role == "admin":
return True
return False
def isMod(self):
if self._role == "moderator":
return True
return False
def isUser(self):
if self._role == "user":
return True
return False
def isBanned(self):
if self._role == "banned":
return True
return False
|
salberin/libsigrokdecode
|
refs/heads/master
|
decoders/ds1307/pd.py
|
2
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Uwe Hermann <uwe@hermann-uwe.de>
## Copyright (C) 2013 Matt Ranostay <mranostay@gmail.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
days_of_week = [
'Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
]
# Return the specified BCD number (max. 8 bits) as integer.
def bcd2int(b):
return (b & 0x0f) + ((b >> 4) * 10)
class Decoder(srd.Decoder):
api_version = 2
id = 'ds1307'
name = 'DS1307'
longname = 'Dallas DS1307'
desc = 'Realtime clock module protocol.'
license = 'gplv2+'
inputs = ['i2c']
outputs = ['ds1307']
annotations = (
('text', 'Human-readable text'),
)
def __init__(self, **kwargs):
self.state = 'IDLE'
self.hours = -1
self.minutes = -1
self.seconds = -1
self.days = -1
self.date = -1
self.months = -1
self.years = -1
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, data):
self.put(self.ss, self.es, self.out_ann, data)
def handle_reg_0x00(self, b): # Seconds
self.seconds = bcd2int(b & 0x7f)
self.putx([0, ['Seconds: %d' % self.seconds]])
def handle_reg_0x01(self, b): # Minutes
self.minutes = bcd2int(b & 0x7f)
self.putx([0, ['Minutes: %d' % self.minutes]])
def handle_reg_0x02(self, b): # Hours
self.hours = bcd2int(b & 0x3f)
self.putx([0, ['Hours: %d' % self.hours]])
def handle_reg_0x03(self, b): # Day of week
self.days = bcd2int(b & 0x7)
self.putx([0, ['Day of Week: %s' % days_of_week[self.days - 1]]])
def handle_reg_0x04(self, b): # Date
self.date = bcd2int(b & 0x3f)
self.putx([0, ['Days: %d' % self.date]])
def handle_reg_0x05(self, b): # Month
self.months = bcd2int(b & 0x1f)
self.putx([0, ['Months: %d' % self.months]])
def handle_reg_0x06(self, b): # Year
self.years = bcd2int(b & 0xff) + 2000;
self.putx([0, ['Years: %d' % self.years]])
def handle_reg_0x07(self, b): # Control Register
pass
def decode(self, ss, es, data):
cmd, databyte = data
# Store the start/end samples of this I²C packet.
self.ss, self.es = ss, es
# State machine.
if self.state == 'IDLE':
# Wait for an I²C START condition.
if cmd != 'START':
return
self.state = 'GET SLAVE ADDR'
self.block_start_sample = ss
elif self.state == 'GET SLAVE ADDR':
# Wait for an address write operation.
# TODO: We should only handle packets to the RTC slave (0x68).
if cmd != 'ADDRESS WRITE':
return
self.state = 'GET REG ADDR'
elif self.state == 'GET REG ADDR':
# Wait for a data write (master selects the slave register).
if cmd != 'DATA WRITE':
return
self.reg = databyte
self.state = 'WRITE RTC REGS'
elif self.state == 'WRITE RTC REGS':
# If we see a Repeated Start here, it's probably an RTC read.
if cmd == 'START REPEAT':
self.state = 'READ RTC REGS'
return
# Otherwise: Get data bytes until a STOP condition occurs.
if cmd == 'DATA WRITE':
handle_reg = getattr(self, 'handle_reg_0x%02x' % self.reg)
handle_reg(databyte)
self.reg += 1
# TODO: Check for NACK!
elif cmd == 'STOP':
# TODO: Handle read/write of only parts of these items.
d = '%s, %02d.%02d.%02d %02d:%02d:%02d' % (
days_of_week[self.days - 1], self.date, self.months,
self.years, self.hours, self.minutes, self.seconds)
self.put(self.block_start_sample, es, self.out_ann,
[0, ['Written date/time: %s' % d]])
self.state = 'IDLE'
else:
pass # TODO
elif self.state == 'READ RTC REGS':
# Wait for an address read operation.
# TODO: We should only handle packets to the RTC slave (0x68).
if cmd == 'ADDRESS READ':
self.state = 'READ RTC REGS2'
return
else:
pass # TODO
elif self.state == 'READ RTC REGS2':
if cmd == 'DATA READ':
handle_reg = getattr(self, 'handle_reg_0x%02x' % self.reg)
handle_reg(databyte)
self.reg += 1
# TODO: Check for NACK!
elif cmd == 'STOP':
d = '%s, %02d.%02d.%02d %02d:%02d:%02d' % (
days_of_week[self.days - 1], self.date, self.months,
self.years, self.hours, self.minutes, self.seconds)
self.put(self.block_start_sample, es, self.out_ann,
[0, ['Read date/time: %s' % d]])
self.state = 'IDLE'
else:
pass # TODO?
else:
raise Exception('Invalid state: %s' % self.state)
|
daavery/audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg-sampler.lv2/waflib/Tools/dbus.py
|
318
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Task,Errors
from waflib.TaskGen import taskgen_method,before_method
@taskgen_method
def add_dbus_file(self,filename,prefix,mode):
if not hasattr(self,'dbus_lst'):
self.dbus_lst=[]
if not'process_dbus'in self.meths:
self.meths.append('process_dbus')
self.dbus_lst.append([filename,prefix,mode])
@before_method('apply_core')
def process_dbus(self):
for filename,prefix,mode in getattr(self,'dbus_lst',[]):
node=self.path.find_resource(filename)
if not node:
raise Errors.WafError('file not found '+filename)
tsk=self.create_task('dbus_binding_tool',node,node.change_ext('.h'))
tsk.env.DBUS_BINDING_TOOL_PREFIX=prefix
tsk.env.DBUS_BINDING_TOOL_MODE=mode
class dbus_binding_tool(Task.Task):
color='BLUE'
ext_out=['.h']
run_str='${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}'
shell=True
def configure(conf):
dbus_binding_tool=conf.find_program('dbus-binding-tool',var='DBUS_BINDING_TOOL')
|
eternalthinker/flask-server-rq-example
|
refs/heads/master
|
venv/lib/python2.7/site-packages/flask/signals.py
|
783
|
# -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# the namespace for code signals. If you are not flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# core signals. For usage examples grep the sourcecode or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
|
mic4ael/indico
|
refs/heads/master
|
indico/modules/events/papers/controllers/paper.py
|
1
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import os
from collections import defaultdict
from itertools import chain
from operator import attrgetter
from flask import flash, request, session
from sqlalchemy.orm import selectinload
from werkzeug.exceptions import Forbidden
from werkzeug.utils import cached_property
from indico.modules.events.contributions import Contribution
from indico.modules.events.papers.controllers.base import RHJudgingAreaBase
from indico.modules.events.papers.forms import BulkPaperJudgmentForm
from indico.modules.events.papers.lists import PaperAssignmentListGenerator, PaperJudgingAreaListGeneratorDisplay
from indico.modules.events.papers.models.revisions import PaperRevisionState
from indico.modules.events.papers.operations import judge_paper, update_reviewing_roles
from indico.modules.events.papers.settings import PaperReviewingRole
from indico.modules.events.papers.views import WPDisplayJudgingArea, WPManagePapers
from indico.modules.events.util import ZipGeneratorMixin
from indico.util.fs import secure_filename
from indico.util.i18n import _, ngettext
from indico.web.flask.util import url_for
from indico.web.util import jsonify_data, jsonify_form, jsonify_template
CFP_ROLE_MAP = {
PaperReviewingRole.judge: attrgetter('judges'),
PaperReviewingRole.content_reviewer: attrgetter('content_reviewers'),
PaperReviewingRole.layout_reviewer: attrgetter('layout_reviewers'),
}
CONTRIB_ROLE_MAP = {
PaperReviewingRole.judge: attrgetter('paper_judges'),
PaperReviewingRole.content_reviewer: attrgetter('paper_content_reviewers'),
PaperReviewingRole.layout_reviewer: attrgetter('paper_layout_reviewers'),
}
class RHPapersListBase(RHJudgingAreaBase):
"""Base class for assignment/judging paper lists"""
@cached_property
def list_generator(self):
if self.management:
return PaperAssignmentListGenerator(event=self.event)
else:
return PaperJudgingAreaListGeneratorDisplay(event=self.event, user=session.user)
class RHPapersList(RHPapersListBase):
"""Display the paper list for assignment/judging"""
@cached_property
def view_class(self):
return WPManagePapers if self.management else WPDisplayJudgingArea
def _process(self):
return self.view_class.render_template(self.template, self.event, **self.list_generator.get_list_kwargs())
@cached_property
def template(self):
return 'management/assignment.html' if self.management else 'display/judging_area.html'
class RHCustomizePapersList(RHPapersListBase):
"""Filter options and columns to display for the paper list"""
ALLOW_LOCKED = True
def _process_GET(self):
list_config = self.list_generator.list_config
return jsonify_template('events/papers/paper_list_filter.html',
event=self.event,
static_items=self.list_generator.static_items,
filters=list_config['filters'],
visible_items=list_config['items'])
def _process_POST(self):
self.list_generator.store_configuration()
return jsonify_data(flash=False, **self.list_generator.render_list())
class RHPapersActionBase(RHPapersListBase):
"""Base class for actions on selected papers"""
def _get_contrib_query_options(self):
return ()
def _process_args(self):
RHPapersListBase._process_args(self)
ids = map(int, request.form.getlist('contribution_id'))
self.contributions = (self.list_generator._build_query()
.filter(Contribution.id.in_(ids))
.options(*self._get_contrib_query_options())
.all())
class RHDownloadPapers(ZipGeneratorMixin, RHPapersActionBase):
"""Generate a ZIP file with paper files for a given list of contributions"""
ALLOW_LOCKED = True
def _prepare_folder_structure(self, item):
paper_title = secure_filename('{}_{}'.format(item.paper.contribution.friendly_id,
item.paper.contribution.title), 'paper')
file_name = secure_filename('{}_{}'.format(item.id, item.filename), 'paper')
return os.path.join(*self._adjust_path_length([paper_title, file_name]))
def _iter_items(self, contributions):
contributions_with_paper = [c for c in self.contributions if c.paper]
for contrib in contributions_with_paper:
for f in contrib.paper.last_revision.files:
yield f
def _process(self):
return self._generate_zip_file(self.contributions, name_prefix='paper-files', name_suffix=self.event.id)
class RHJudgePapers(RHPapersActionBase):
"""Bulk judgment of papers"""
def _process(self):
form = BulkPaperJudgmentForm(event=self.event, judgment=request.form.get('judgment'),
contribution_id=[c.id for c in self.contributions])
if form.validate_on_submit():
submitted_papers = [c.paper for c in self.contributions if
c.paper and c.paper.last_revision.state == PaperRevisionState.submitted]
for submitted_paper in submitted_papers:
judge_paper(submitted_paper, form.judgment.data, form.judgment_comment.data, judge=session.user)
num_submitted_papers = len(submitted_papers)
num_not_submitted_papers = len(self.contributions) - num_submitted_papers
if num_submitted_papers:
flash(ngettext("One paper has been judged.",
"{num} papers have been judged.",
num_submitted_papers).format(num=num_submitted_papers), 'success')
if num_not_submitted_papers:
flash(ngettext("One contribution has been skipped since it has no paper submitted yet or it is in "
"a final state.",
"{num} contributions have been skipped since they have no paper submitted yet or they "
"are in a final state.",
num_not_submitted_papers).format(num=num_not_submitted_papers), 'warning')
return jsonify_data(**self.list_generator.render_list())
return jsonify_form(form=form, submit=_('Judge'), disabled_until_change=False)
class RHAssignPapersBase(RHPapersActionBase):
"""Base class for assigning/unassigning paper reviewing roles"""
def _get_contrib_query_options(self):
return [selectinload('person_links')]
def _process_args(self):
RHPapersActionBase._process_args(self)
self.role = PaperReviewingRole[request.view_args['role']]
user_ids = map(int, request.form.getlist('user_id'))
self.users = {u for u in CFP_ROLE_MAP[self.role](self.event.cfp) if u.id in user_ids}
def _check_access(self):
RHPapersActionBase._check_access(self)
if not self.management and self.role == PaperReviewingRole.judge:
raise Forbidden
def _process_assignment(self, assign):
update_reviewing_roles(self.event, self.users, self.contributions, self.role, assign)
if assign:
flash(_("Paper reviewing roles have been assigned."), 'success')
else:
flash(_("Paper reviewing roles have been unassigned."), 'success')
return jsonify_data(**self.list_generator.render_list())
def _get_conflicts(self, users):
conflicts = defaultdict(list)
for user in users:
if not user.affiliation:
continue
for contribution in self.contributions:
conflicts[user].extend(
(
contribution.title,
url_for('contributions.display_contribution', contribution),
)
for person in contribution.person_links
if user.affiliation in person.affiliation
)
return conflicts
def _render_form(self, users, action):
conflicts = self._get_conflicts(users)
user_competences = self.event.cfp.user_competences
competences = {'competences_{}'.format(user_id): competences.competences
for user_id, competences in user_competences.iteritems()}
return jsonify_template('events/papers/assign_role.html', event=self.event, role=self.role.name,
action=action, users=users, competences=competences,
contribs=self.contributions, conflicts=conflicts)
class RHAssignPapers(RHAssignPapersBase):
"""Render the user list to assign paper reviewing roles"""
def _process(self):
if self.users:
return self._process_assignment(True)
users = CFP_ROLE_MAP[self.role](self.event.cfp)
return self._render_form(users, 'assign')
class RHUnassignPapers(RHAssignPapersBase):
"""Render the user list to unassign paper reviewing roles"""
def _process(self):
if self.users:
return self._process_assignment(False)
_get_users = CONTRIB_ROLE_MAP[self.role]
users = set(chain.from_iterable(_get_users(c) for c in self.contributions))
return self._render_form(users, 'unassign')
|
sangeethah/validation-tests
|
refs/heads/master
|
tests/v2_validation/cattlevalidationtest/core/test_services.py
|
1
|
from common_fixtures import * # NOQA
from cattle import ApiError
from test_services_lb_balancer import create_environment_with_balancer_services
TEST_SERVICE_OPT_IMAGE = 'ibuildthecloud/helloworld'
TEST_SERVICE_OPT_IMAGE_LATEST = TEST_SERVICE_OPT_IMAGE + ':latest'
TEST_SERVICE_OPT_IMAGE_UUID = 'docker:' + TEST_SERVICE_OPT_IMAGE_LATEST
LB_IMAGE_UUID = "docker:sangeetha/testlbsd:latest"
SSH_IMAGE_UUID = "docker:sangeetha/testclient:latest"
docker_config_running = [{"docker_param_name": "State.Running",
"docker_param_value": "true"}]
docker_config_stopped = [{"docker_param_name": "State.Running",
"docker_param_value": "false"}]
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
total_time = [0]
shared_env = []
@pytest.fixture(scope='session', autouse=True)
def create_env_for_activate_deactivate(request, client):
service, env = create_env_and_svc_activate(client, 3, False)
shared_env.append({"service": service,
"env": env})
def fin():
to_delete = [env]
delete_all(client, to_delete)
request.addfinalizer(fin)
def deactivate_activate_service(client, service):
# Deactivate service
service = service.deactivate()
service = client.wait_success(service, 300)
assert service.state == "inactive"
# Activate Service
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
return service
def create_env_and_svc_activate(client, scale, check=True,
retainIp=False):
launch_config = {"imageUuid": TEST_IMAGE_UUID}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale, check, retainIp)
return service, env
def create_env_and_svc_activate_launch_config(
client, launch_config, scale,
check=True, retainIp=False):
start_time = time.time()
service, env = create_env_and_svc(client, launch_config, scale, retainIp)
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
if check:
check_container_in_service(client, service)
time_taken = time.time() - start_time
total_time[0] = total_time[0] + time_taken
logger.info("time taken - " + str(time_taken))
logger.info("total time taken - " + str(total_time[0]))
return service, env
def test_services_docker_options(client, socat_containers):
hosts = client.list_host(kind='docker', removed_null=True, state="active")
con_host = hosts[0]
vol_container = client.create_container(imageUuid=TEST_IMAGE_UUID,
name=random_str(),
requestedHostId=con_host.id
)
vol_container = client.wait_success(vol_container)
volume_in_host = "/test/container"
volume_in_container = "/test/vol1"
docker_vol_value = volume_in_host + ":" + volume_in_container + ":ro"
cap_add = ["CHOWN"]
cap_drop = ["KILL"]
restart_policy = {"maximumRetryCount": 10, "name": "on-failure"}
dns_search = ['1.2.3.4']
dns_name = ['1.2.3.4']
domain_name = "rancher.io"
host_name = "test"
user = "root"
command = ["sleep", "9000"]
env_var = {"TEST_FILE": "/etc/testpath.conf"}
memory = 8000000
cpu_set = "0"
cpu_shares = 400
launch_config = {"imageUuid": TEST_SERVICE_OPT_IMAGE_UUID,
"command": command,
"dataVolumes": [docker_vol_value],
"dataVolumesFrom": [vol_container.id],
"environment": env_var,
"capAdd": cap_add,
"capDrop": cap_drop,
"dnsSearch": dns_search,
"dns": dns_name,
"privileged": True,
"domainName": domain_name,
"stdinOpen": True,
"tty": True,
"memory": memory,
"cpuSet": cpu_set,
"cpuShares": cpu_shares,
"restartPolicy": restart_policy,
"directory": "/",
"hostname": host_name,
"user": user,
"requestedHostId": con_host.id
}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
env = env.activateservices()
service = client.wait_success(service, 300)
assert service.state == "active"
check_container_in_service(client, service)
container_list = get_service_container_list(client, service)
dns_name.append(RANCHER_DNS_SERVER)
dns_search.append(env.name+"."+RANCHER_DNS_SEARCH)
dns_search.append(service.name+"."+env.name+"."+RANCHER_DNS_SEARCH)
dns_search.append(RANCHER_DNS_SEARCH)
for c in container_list:
docker_client = get_docker_client(c.hosts[0])
inspect = docker_client.inspect_container(c.externalId)
assert docker_vol_value in inspect["HostConfig"]["Binds"]
assert inspect["HostConfig"]["VolumesFrom"] == \
[vol_container.externalId]
assert inspect["HostConfig"]["PublishAllPorts"] is False
assert inspect["HostConfig"]["Privileged"] is True
assert inspect["Config"]["OpenStdin"] is True
assert inspect["Config"]["Tty"] is True
assert inspect["HostConfig"]["Dns"] == dns_name
assert inspect["HostConfig"]["DnsSearch"] == dns_search
assert inspect["Config"]["Hostname"] == host_name
assert inspect["Config"]["Domainname"] == domain_name
assert inspect["Config"]["User"] == user
assert inspect["HostConfig"]["CapAdd"] == cap_add
assert inspect["HostConfig"]["CapDrop"] == cap_drop
assert inspect["HostConfig"]["CpusetCpus"] == cpu_set
# No support for restart
assert inspect["HostConfig"]["RestartPolicy"]["Name"] == ""
assert \
inspect["HostConfig"]["RestartPolicy"]["MaximumRetryCount"] == 0
assert inspect["Config"]["Cmd"] == command
assert inspect["HostConfig"]["Memory"] == memory
assert "TEST_FILE=/etc/testpath.conf" in inspect["Config"]["Env"]
assert inspect["HostConfig"]["CpuShares"] == cpu_shares
delete_all(client, [env])
def test_services_docker_options_2(client, socat_containers):
hosts = client.list_host(kind='docker', removed_null=True, state="active")
cpu_shares = 400
ulimit = {"hard": 1024, "name": "cpu", "soft": 1024}
ulimit_inspect = {"Hard": 1024, "Name": "cpu", "Soft": 1024}
ipcMode = "host"
sysctls = {"net.ipv4.ip_forward": "1"}
dev_opts = {
'/dev/null': {
'readIops': 2000,
'writeIops': 3000,
'readBps': 4000,
'writeBps': 200,
}
}
cpu_shares = 400
blkio_weight = 1000
cpu_period = 10000
cpu_quota = 20000
cpu_set = "0"
cpu_setmems = "0"
dns_opt = ["abc"]
group_add = ["root"]
kernel_memory = 6000000
memory_reservation = 5000000
memory_swap = -1
memory_swappiness = 100
oom_killdisable = True
oom_scoreadj = 100
read_only = True
shm_size = 1024
stop_signal = "SIGTERM"
uts = "host"
dev_opts_inspect = {u"Path": "/dev/null",
u"Rate": 400}
cgroup_parent = "xyz"
extraHosts = ["host1:10.1.1.1", "host2:10.2.2.2"]
tmp_fs = {"/tmp": "rw"}
security_opt = ["label=user:USER", "label=role:ROLE"]
launch_config = {"imageUuid": TEST_SERVICE_OPT_IMAGE_UUID,
"extraHosts": extraHosts,
"privileged": True,
"cpuShares": cpu_shares,
"blkioWeight": blkio_weight,
"blkioDeviceOptions": dev_opts,
"cgroupParent": cgroup_parent,
"cpuShares": cpu_shares,
"cpuPeriod": cpu_period,
"cpuQuota": cpu_quota,
"cpuSet": cpu_set,
"cpuSetMems": cpu_setmems,
"dnsOpt": dns_opt,
"groupAdd": group_add,
"kernelMemory": kernel_memory,
"memoryReservation": memory_reservation,
"memorySwap": memory_swap,
"memorySwappiness": memory_swappiness,
"oomKillDisable": oom_killdisable,
"oomScoreAdj": oom_scoreadj,
"readOnly": read_only,
"securityOpt": security_opt,
"shmSize": shm_size,
"stopSignal": stop_signal,
"sysctls": sysctls,
"tmpfs": tmp_fs,
"ulimits": [ulimit],
"ipcMode": ipcMode,
"uts": uts,
"requestedHostId": hosts[0].id
}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
env = env.activateservices()
service = client.wait_success(service, 300)
assert service.state == "active"
check_container_in_service(client, service)
container_list = get_service_container_list(client, service)
for c in container_list:
docker_client = get_docker_client(c.hosts[0])
inspect = docker_client.inspect_container(c.externalId)
assert inspect["HostConfig"]["ExtraHosts"] == extraHosts
assert inspect["HostConfig"]["BlkioWeight"] == blkio_weight
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 4000
assert \
inspect["HostConfig"]["BlkioDeviceReadBps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 200
assert \
inspect["HostConfig"]["BlkioDeviceWriteBps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 2000
assert \
inspect["HostConfig"]["BlkioDeviceReadIOps"] == [dev_opts_inspect]
dev_opts_inspect["Path"] = "/dev/null"
dev_opts_inspect["Rate"] = 3000
assert \
inspect["HostConfig"]["BlkioDeviceWriteIOps"] == [dev_opts_inspect]
assert inspect["HostConfig"]["CpuShares"] == cpu_shares
assert inspect["HostConfig"]["CgroupParent"] == cgroup_parent
assert inspect["HostConfig"]["CpuPeriod"] == cpu_period
assert inspect["HostConfig"]["CpuQuota"] == cpu_quota
assert inspect["HostConfig"]["CpusetCpus"] == cpu_set
assert inspect["HostConfig"]["CpusetMems"] == cpu_setmems
assert inspect["HostConfig"]["KernelMemory"] == kernel_memory
assert inspect["HostConfig"]["MemoryReservation"] == memory_reservation
assert inspect["HostConfig"]["MemorySwap"] == memory_swap
assert inspect["HostConfig"]["MemorySwappiness"] == memory_swappiness
assert inspect["HostConfig"]["OomKillDisable"]
assert inspect["HostConfig"]["OomScoreAdj"] == oom_scoreadj
assert inspect["HostConfig"]["ReadonlyRootfs"]
assert inspect["HostConfig"]["SecurityOpt"] == security_opt
assert inspect["HostConfig"]["Tmpfs"] == tmp_fs
assert inspect["HostConfig"]["ShmSize"] == shm_size
assert inspect["Config"]["StopSignal"] == stop_signal
assert inspect["HostConfig"]["Ulimits"] == [ulimit_inspect]
assert inspect["HostConfig"]["IpcMode"] == ipcMode
assert inspect["HostConfig"]["UTSMode"] == uts
assert inspect["HostConfig"]["DnsOptions"] == dns_opt
assert inspect["HostConfig"]["GroupAdd"] == group_add
delete_all(client, [env])
def test_services_port_and_link_options(client,
socat_containers):
hosts = client.list_host(kind='docker', removed_null=True, state="active")
host = hosts[0]
link_host = hosts[1]
link_name = "WEB1"
link_port = 80
exposed_port = 9999
link_container = client.create_container(
imageUuid=LB_IMAGE_UUID,
environment={'CONTAINER_NAME': link_name},
name=random_str(),
requestedHostId=host.id
)
link_container = client.wait_success(link_container)
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [str(exposed_port)+":22/tcp"],
"instanceLinks": {
link_name:
link_container.id},
"requestedHostId": link_host.id,
}
service, env = create_env_and_svc(client, launch_config, 1)
env = env.activateservices()
service = client.wait_success(service, 300)
container_name = get_container_name(env, service, 1)
containers = client.list_container(name=container_name, state="running")
assert len(containers) == 1
con = containers[0]
validate_exposed_port_and_container_link(client, con, link_name,
link_port, exposed_port)
delete_all(client, [env, link_container])
def test_services_multiple_expose_port(client):
public_port = range(2080, 2092)
private_port = range(80, 92)
port_mapping = []
for i in range(0, len(public_port)):
port_mapping.append(str(public_port[i])+":" +
str(private_port[i]) + "/tcp")
launch_config = {"imageUuid": MULTIPLE_EXPOSED_PORT_UUID,
"ports": port_mapping,
}
service, env = create_env_and_svc(client, launch_config, 3)
env = env.activateservices()
service = client.wait_success(service, 300)
validate_exposed_port(client, service, public_port)
delete_all(client, [env])
def test_services_random_expose_port(client):
launch_config = {"imageUuid": MULTIPLE_EXPOSED_PORT_UUID,
"ports": ["80/tcp", "81/tcp"]
}
service, env = create_env_and_svc(client, launch_config, 3)
env = env.activateservices()
service = client.wait_success(service, 300)
port = service.launchConfig["ports"][0]
exposedPort1 = int(port[0:port.index(":")])
assert exposedPort1 in range(49153, 65535)
port = service.launchConfig["ports"][1]
exposedPort2 = int(port[0:port.index(":")])
assert exposedPort2 in range(49153, 65535)
print service.publicEndpoints
validate_exposed_port(client, service, [exposedPort1, exposedPort2])
delete_all(client, [env])
def test_services_random_expose_port_exhaustrange(
admin_client, client):
# Set random port range to 6 ports and exhaust 5 of them by creating a
# service that has 5 random ports exposed
project = admin_client.list_project(name=PROJECT_NAME)[0]
project = admin_client.update(
project, servicesPortRange={"startPort": 65500, "endPort": 65505})
project = wait_success(client, project)
launch_config = {"imageUuid": MULTIPLE_EXPOSED_PORT_UUID,
"ports":
["80/tcp", "81/tcp", "82/tcp", "83/tcp", "84/tcp"]
}
service, env = create_env_and_svc(client, launch_config, 3)
env = env.activateservices()
service = client.wait_success(service, 60)
wait_for_condition(client,
service,
lambda x: len(x.publicEndpoints) == 15,
lambda x:
"publicEndpoints is " + str(x.publicEndpoints))
exposedPorts = []
for i in range(0, 5):
port = service.launchConfig["ports"][0]
exposedPort = int(port[0:port.index(":")])
exposedPorts.append(exposedPort)
assert exposedPort in range(65500, 65506)
validate_exposed_port(client, service, exposedPorts)
# Create a service that has 2 random exposed ports when there is only 1
# free port available in the random port range
# Validate that the service gets created with no ports exposed
launch_config = {"imageUuid": MULTIPLE_EXPOSED_PORT_UUID,
"ports":
["80/tcp", "81/tcp"]
}
random_name = random_str()
service_name = random_name.replace("-", "")
service1 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config,
scale=3,
startOnCreate=True)
time.sleep(5)
assert service1.state == "registering"
"""
service1, env1 = create_env_and_svc(client, launch_config, 3)
env1 = env1.activateservices()
service1 = client.wait_success(service1, 60)
print service.publicEndpoints
wait_for_condition(client,
service1,
lambda x: x.publicEndpoints is not None,
lambda x:
"publicEndpoints is " + str(x.publicEndpoints))
service1 = client.reload(service1)
print service.publicEndpoints
assert len(service1.publicEndpoints) == 0
"""
# Delete the service that consumed 5 random ports
delete_all(client, [service])
wait_for_condition(
client, service,
lambda x: x.state == "removed",
lambda x: 'State is: ' + x.state)
# Wait for service that is stuck in "registering" state to get to "active"
# state
wait_for_condition(
client, service1,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
120)
wait_for_condition(client,
service1,
lambda x: x.publicEndpoints is not None,
lambda x:
"publicEndpoints is " + str(x.publicEndpoints))
service1 = client.reload(service1)
assert service1.publicEndpoints is not None
assert len(service1.publicEndpoints) == 6
exposedPorts = []
for i in range(0, 2):
port = service1.launchConfig["ports"][0]
exposedPort = int(port[0:port.index(":")])
exposedPorts.append(exposedPort)
assert exposedPort in range(65500, 65506)
validate_exposed_port(client, service1, exposedPorts)
delete_all(client, [env])
def test_environment_activate_deactivate_delete(client,
socat_containers):
launch_config = {"imageUuid": TEST_IMAGE_UUID}
scale = 1
service1, env = create_env_and_svc(client, launch_config,
scale)
service2 = create_svc(client, env, launch_config, scale)
# Environment Activate Services
env = env.activateservices()
service1 = client.wait_success(service1, 300)
assert service1.state == "active"
check_container_in_service(client, service1)
service2 = client.wait_success(service2, 300)
assert service2.state == "active"
check_container_in_service(client, service2)
# Environment Deactivate Services
env = env.deactivateservices()
wait_until_instances_get_stopped(client, service1)
wait_until_instances_get_stopped(client, service2)
service1 = client.wait_success(service1, 300)
assert service1.state == "inactive"
check_stopped_container_in_service(client, service1)
service2 = client.wait_success(service2, 300)
assert service2.state == "inactive"
check_stopped_container_in_service(client, service2)
# Environment Activate Services
env = env.activateservices()
service1 = client.wait_success(service1, 300)
assert service1.state == "active"
check_container_in_service(client, service1)
service2 = client.wait_success(service2, 300)
assert service2.state == "active"
check_container_in_service(client, service2)
# Delete Environment
env = client.wait_success(client.delete(env))
assert env.state == "removed"
# Deleting service results in instances of the service to be "removed".
# instance continues to be part of service , until the instance is purged.
check_for_deleted_service(client, env, service1)
check_for_deleted_service(client, env, service2)
delete_all(client, [env])
def test_service_activate_deactivate_delete(client,
socat_containers):
launch_config = {"imageUuid": TEST_IMAGE_UUID}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
# Activate Services
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
check_container_in_service(client, service)
# Deactivate Services
service = service.deactivate()
service = client.wait_success(service, 300)
assert service.state == "inactive"
wait_until_instances_get_stopped(client, service)
check_stopped_container_in_service(client, service)
# Activate Services
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
check_container_in_service(client, service)
# Delete Service
service = client.wait_success(client.delete(service))
assert service.state == "removed"
check_for_deleted_service(client, env, service)
delete_all(client, [env])
def test_service_activate_stop_instance(
client, socat_containers):
service = shared_env[0]["service"]
check_for_service_reconciliation_on_stop(client, service)
def test_service_activate_delete_instance(
client, socat_containers):
service = shared_env[0]["service"]
check_for_service_reconciliation_on_delete(client, service)
def test_service_activate_purge_instance(
client, socat_containers):
service = shared_env[0]["service"]
# Purge 2 instances
containers = get_service_container_list(client, service)
container1 = containers[0]
container1 = client.wait_success(client.delete(container1))
container1 = client.wait_success(container1.purge())
container2 = containers[1]
container2 = client.wait_success(client.delete(container2))
container2 = client.wait_success(container2.purge())
wait_for_scale_to_adjust(client, service)
check_container_in_service(client, service)
@pytest.mark.skipif(
True, reason="Skip since there is no support for restore from v1.6.0")
def test_service_activate_restore_instance(
client, socat_containers):
service = shared_env[0]["service"]
# Restore 2 instances
containers = get_service_container_list(client, service)
container1 = containers[0]
container1 = client.wait_success(client.delete(container1))
container1 = client.wait_success(container1.restore())
container2 = containers[1]
container2 = client.wait_success(client.delete(container2))
container2 = client.wait_success(container2.restore())
assert container1.state == "stopped"
assert container2.state == "stopped"
wait_for_scale_to_adjust(client, service)
check_container_in_service(client, service)
delete_all(client, [container1, container2])
def test_service_scale_up(client, socat_containers):
check_service_scale(client, socat_containers, 2, 4)
def test_service_scale_down(client, socat_containers):
check_service_scale(client, socat_containers, 4, 2, 2)
def test_service_activate_stop_instance_scale_up(
client, socat_containers):
check_service_activate_stop_instance_scale(
client, socat_containers, 3, 4, [1])
def test_service_activate_delete_instance_scale_up(
client, socat_containers):
check_service_activate_delete_instance_scale(
client, socat_containers, 3, 4, [1])
def test_service_activate_stop_instance_scale_down(
client, socat_containers):
check_service_activate_stop_instance_scale(
client, socat_containers, 4, 1, [1], 3)
def test_service_activate_delete_instance_scale_down(
client, socat_containers):
check_service_activate_delete_instance_scale(
client, socat_containers, 4, 1, [1], 3)
def test_service_activate_stop_instance_scale_up_1(
client, socat_containers):
check_service_activate_stop_instance_scale(
client, socat_containers, 3, 4, [3])
def test_service_activate_delete_instance_scale_up_1(
client, socat_containers):
check_service_activate_delete_instance_scale(
client, socat_containers, 3, 4, [3])
def test_service_activate_stop_instance_scale_down_1(
client, socat_containers):
check_service_activate_stop_instance_scale(
client, socat_containers, 4, 1, [4], 3)
def test_service_activate_delete_instance_scale_down_1(
client, socat_containers):
check_service_activate_delete_instance_scale(client,
socat_containers,
4, 1, [4], 3)
def test_service_activate_stop_instance_scale_up_2(
client, socat_containers):
check_service_activate_stop_instance_scale(
client, socat_containers, 3, 4, [1, 2, 3])
def test_service_activate_delete_instance_scale_up_2(
client, socat_containers):
check_service_activate_delete_instance_scale(
client, socat_containers, 3, 4, [1, 2, 3])
def test_service_activate_stop_instance_scale_down_2(
client, socat_containers):
check_service_activate_stop_instance_scale(
client, socat_containers, 4, 1, [1, 2, 3, 4], 3)
def test_service_activate_delete_instance_scale_down_2(
client, socat_containers):
check_service_activate_delete_instance_scale(
client, socat_containers, 4, 1, [1, 2, 3, 4])
def test_service_activate_stop_instance_scale_up_3(
client, socat_containers):
check_service_activate_stop_instance_scale(
client, socat_containers, 3, 4, [2])
def test_service_activate_delete_instance_scale_up_3(
client, socat_containers):
check_service_activate_delete_instance_scale(
client, socat_containers, 3, 4, [2])
def test_service_activate_stop_instance_scale_down_3(
client, socat_containers):
check_service_activate_stop_instance_scale(
client, socat_containers, 4, 1, [2], 3)
def test_service_activate_delete_instance_scale_down_3(
client, socat_containers):
check_service_activate_delete_instance_scale(
client, socat_containers, 4, 1, [2], 3)
def test_services_hostname_override_1(client, socat_containers):
host_name = "test"
domain_name = "abc.com"
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"domainName": domain_name,
"hostname": host_name,
"labels":
{"io.rancher.container.hostname_override":
"container_name"}
}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
env = env.activateservices()
service = client.wait_success(service, 300)
assert service.state == "active"
check_container_in_service(client, service)
container_list = get_service_container_list(client, service)
assert len(container_list) == service.scale
print container_list
for c in container_list:
docker_client = get_docker_client(c.hosts[0])
inspect = docker_client.inspect_container(c.externalId)
assert inspect["Config"]["Hostname"] == c.name
delete_all(client, [env])
def test_services_hostname_override_2(client, socat_containers):
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"labels":
{"io.rancher.container.hostname_override":
"container_name"}
}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
env = env.activateservices()
service = client.wait_success(service, 300)
assert service.state == "active"
check_container_in_service(client, service)
container_list = get_service_container_list(client, service)
assert len(container_list) == service.scale
for c in container_list:
docker_client = get_docker_client(c.hosts[0])
inspect = docker_client.inspect_container(c.externalId)
assert inspect["Config"]["Hostname"] == c.name
delete_all(client, [env])
def test_service_reconcile_stop_instance_restart_policy_always(
client, socat_containers):
scale = 3
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"restartPolicy": {"name": "always"}}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale)
check_for_service_reconciliation_on_stop(client, service)
delete_all(client, [env])
def test_service_reconcile_delete_instance_restart_policy_always(
client, socat_containers):
scale = 3
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"restartPolicy": {"name": "always"}}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale)
check_for_service_reconciliation_on_delete(client, service)
delete_all(client, [env])
def test_service_reconcile_delete_instance_restart_policy_no(
client, socat_containers):
scale = 3
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"labels": {"io.rancher.container.start_once": True}
}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale)
check_for_service_reconciliation_on_delete(client, service)
delete_all(client, [env])
def test_service_reconcile_stop_instance_restart_policy_no(
client, socat_containers):
scale = 3
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"labels": {"io.rancher.container.start_once": True}}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale)
# Stop 2 containers of the service
assert service.scale > 1
containers = get_service_container_list(client, service)
assert len(containers) == service.scale
assert service.scale > 1
container1 = containers[0]
stop_container_from_host(client, container1)
container2 = containers[1]
stop_container_from_host(client, container2)
service = wait_state(client, service, "active")
time.sleep(30)
assert service.state == "active"
# Make sure that the containers continue to remain in "stopped" state
container1 = client.reload(container1)
container2 = client.reload(container2)
assert container1.state == 'stopped'
assert container2.state == 'stopped'
delete_all(client, [env])
def test_service_reconcile_stop_instance_restart_policy_failure(
client, socat_containers):
scale = 3
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"restartPolicy": {"name": "on-failure"}
}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale)
check_for_service_reconciliation_on_stop(client, service)
delete_all(client, [env])
def test_service_reconcile_delete_instance_restart_policy_failure(
client, socat_containers):
scale = 3
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"restartPolicy": {"name": "on-failure"}
}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale)
check_for_service_reconciliation_on_delete(client, service)
delete_all(client, [env])
def test_service_reconcile_stop_instance_restart_policy_failure_count(
client, socat_containers):
scale = 3
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"restartPolicy": {"maximumRetryCount": 5,
"name": "on-failure"}
}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale)
check_for_service_reconciliation_on_stop(client, service)
delete_all(client, [env])
def test_service_reconcile_delete_instance_restart_policy_failure_count(
client, socat_containers):
scale = 3
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"restartPolicy": {"maximumRetryCount": 5,
"name": "on-failure"}
}
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale)
check_for_service_reconciliation_on_delete(client, service)
delete_all(client, [env])
def test_service_with_healthcheck(client, socat_containers):
scale = 3
env, service = service_with_healthcheck_enabled(
client, scale)
delete_all(client, [env])
def test_service_with_healthcheck_none(client, socat_containers):
scale = 3
env, service = service_with_healthcheck_enabled(
client, scale, strategy="none")
delete_all(client, [env])
def test_service_with_healthcheck_recreate(
client, socat_containers):
scale = 10
env, service = service_with_healthcheck_enabled(
client, scale, strategy="recreate")
delete_all(client, [env])
def test_service_with_healthcheck_recreateOnQuorum(
client, socat_containers):
scale = 10
env, service = service_with_healthcheck_enabled(
client, scale, strategy="recreateOnQuorum", qcount=5)
delete_all(client, [env])
def test_service_with_healthcheck_container_unhealthy(
client, socat_containers):
scale = 2
port = 9998
env, service = service_with_healthcheck_enabled(client,
scale, port)
# Delete requestUrl from one of the containers to trigger health check
# failure and service reconcile
container_list = get_service_container_list(client, service)
con = container_list[1]
mark_container_unhealthy(client, con, port)
wait_for_condition(
client, con,
lambda x: x.healthState == 'unhealthy',
lambda x: 'State is: ' + x.healthState)
con = client.reload(con)
assert con.healthState == "unhealthy"
wait_for_condition(
client, con,
lambda x: x.state in ('removed', 'purged'),
lambda x: 'State is: ' + x.healthState)
wait_for_scale_to_adjust(client, service)
con = client.reload(con)
assert con.state in ('removed', 'purged')
container_list = get_service_container_list(client, service)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
delete_all(client, [env])
def test_service_with_healthcheck_container_unhealthy_retainip(
client, socat_containers):
scale = 2
port = 799
env, service = service_with_healthcheck_enabled(client,
scale, port,
retainIp=True)
# Delete requestUrl from one of the containers to trigger health check
# failure and service reconcile
container_list = get_service_container_list(client, service)
con = container_list[1]
con_name = con.name
external_id = con.externalId
ipAddress = con.primaryIpAddress
mark_container_unhealthy(client, con, port)
wait_for_condition(
client, con,
lambda x: x.healthState == 'unhealthy',
lambda x: 'State is: ' + x.healthState)
con = client.reload(con)
assert con.healthState == "unhealthy"
wait_for_condition(
client, con,
lambda x: x.state in ('removed', 'purged'),
lambda x: 'State is: ' + x.healthState)
wait_for_scale_to_adjust(client, service)
con = client.reload(con)
assert con.state in ('removed', 'purged')
container_list = get_service_container_list(client, service)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
# Make sure that the new container that was created has the same ip as the
# Unhealthy container
containers = client.list_container(name=con_name,
removed_null=True)
assert len(containers) == 1
container = containers[0]
assert container.state == 'running'
new_ipAddress = container.primaryIpAddress
new_externalId = container.externalId
assert ipAddress == new_ipAddress
assert external_id != new_externalId
delete_all(client, [env])
def test_service_with_healthcheck_none_container_unhealthy(
client, socat_containers):
scale = 3
port = 800
env, service = service_with_healthcheck_enabled(client,
scale, port,
strategy="none")
# Delete requestUrl from one of the containers to trigger health check
# failure and service reconcile
container_list = get_service_container_list(client, service)
con1 = container_list[1]
mark_container_unhealthy(client, con1, port)
# Validate that the container is marked unhealthy
wait_for_condition(
client, con1,
lambda x: x.healthState == 'unhealthy',
lambda x: 'State is: ' + x.healthState)
con1 = client.reload(con1)
assert con1.healthState == "unhealthy"
# Make sure that the container continues to be marked unhealthy
# and is in "Running" state
time.sleep(10)
con1 = client.reload(con1)
assert con1.healthState == "unhealthy"
assert con1.state == "running"
mark_container_healthy(client, container_list[1], port)
# Make sure that the container gets marked healthy
wait_for_condition(
client, con1,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
con1 = client.reload(con1)
assert con1.healthState == "healthy"
assert con1.state == "running"
delete_all(client, [env])
def test_service_with_healthcheck_none_container_unhealthy_delete(
client, socat_containers):
scale = 3
port = 801
env, service = service_with_healthcheck_enabled(client,
scale, port,
strategy="none")
# Delete requestUrl from containers to trigger health check
# failure and service reconcile
container_list = get_service_container_list(client, service)
unhealthy_containers = [container_list[0],
container_list[1]]
for con in unhealthy_containers:
mark_container_unhealthy(client, con, port)
# Validate that the container is marked unhealthy
for con in unhealthy_containers:
wait_for_condition(
client, con,
lambda x: x.healthState == 'unhealthy',
lambda x: 'State is: ' + x.healthState)
con = client.reload(con)
assert con.healthState == "unhealthy"
# Make sure that the container continues to be marked unhealthy
# and is in "Running" state
time.sleep(10)
for con in unhealthy_containers:
con = client.reload(con)
assert con.healthState == "unhealthy"
assert con.state == "running"
# Delete 2 containers that are unhealthy
for con in unhealthy_containers:
container = client.wait_success(client.delete(con))
assert container.state == 'removed'
# Validate that the service reconciles on deletion of unhealthy containers
wait_for_scale_to_adjust(client, service)
check_container_in_service(client, service)
# Validate that all containers of the service get to "healthy" state
container_list = get_service_container_list(client, service)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
delete_all(client, [env])
def test_service_with_healthcheck_quorum_containers_unhealthy_1(
client, socat_containers):
scale = 2
port = 802
env, service = service_with_healthcheck_enabled(
client, scale, port, strategy="recreateOnQuorum",
qcount=1)
# Make 1 container unhealthy , so there is 1 container that is healthy
container_list = get_service_container_list(client, service)
con1 = container_list[1]
mark_container_unhealthy(client, con1, port)
# Validate that the container is marked unhealthy
wait_for_condition(
client, con1,
lambda x: x.healthState == 'unhealthy',
lambda x: 'State is: ' + x.healthState)
con1 = client.reload(con1)
assert con1.healthState == "unhealthy"
# Validate that the containers get removed
wait_for_condition(
client, con1,
lambda x: x.state in ('removed', 'purged'),
lambda x: 'State is: ' + x.healthState)
wait_for_scale_to_adjust(client, service)
con = client.reload(con1)
assert con.state in ('removed', 'purged')
# Validate that the service reconciles
container_list = get_service_container_list(client, service)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
delete_all(client, [env])
@pytest.mark.skipif(True, reason="Known issue - #5411")
def test_service_with_healthcheck_quorum_container_unhealthy_2(
client, socat_containers):
scale = 3
port = 803
env, service = service_with_healthcheck_enabled(
client, scale, port, strategy="recreateOnQuorum",
qcount=2)
# Make 2 containers unhealthy , so 1 container is healthy state
container_list = get_service_container_list(client, service)
unhealthy_containers = [container_list[1],
container_list[2]]
for con in unhealthy_containers:
mark_container_unhealthy(client, con, port)
# Validate that the container is marked unhealthy
for con in unhealthy_containers:
wait_for_condition(
client, con,
lambda x: x.healthState == 'unhealthy',
lambda x: 'State is: ' + x.healthState)
con = client.reload(con)
assert con.healthState == "unhealthy"
# Make sure that the container continues to be marked unhealthy
# and is in "Running" state
time.sleep(10)
for con in unhealthy_containers:
con = client.reload(con)
assert con.healthState == "unhealthy"
assert con.state == "running"
delete_all(client, [env])
def test_dns_service_with_healthcheck_none_container_unhealthy(
client, socat_containers):
scale = 3
port = 804
cport = 805
# Create HealthCheck enabled Service
env, service = service_with_healthcheck_enabled(client,
scale, port,
strategy="none")
# Create Client Service for DNS access check
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [str(cport)+":22/tcp"]}
random_name = random_str()
service_name = random_name.replace("-", "")
client_service = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config_svc,
scale=1)
client_service = client.wait_success(client_service)
assert client_service.state == "inactive"
client_service = client.wait_success(client_service.activate())
assert client_service.state == "active"
# Check for DNS resolution
validate_linked_service(client, client_service, [service], cport)
# Delete requestUrl from one of the containers to trigger health check
# failure and service reconcile
container_list = get_service_container_list(client, service)
con1 = container_list[1]
mark_container_unhealthy(client, con1, port)
# Validate that the container is marked unhealthy
wait_for_condition(
client, con1,
lambda x: x.healthState == 'unhealthy',
lambda x: 'State is: ' + x.healthState)
con1 = client.reload(con1)
assert con1.healthState == "unhealthy"
# Check for DNS resolution
validate_linked_service(client, client_service, [service],
cport, exclude_instance=con1)
# Make sure that the container continues to be marked unhealthy
# and is in "Running" state
time.sleep(10)
con1 = client.reload(con1)
assert con1.healthState == "unhealthy"
assert con1.state == "running"
mark_container_healthy(client, container_list[1], port)
# Make sure that the container gets marked healthy
wait_for_condition(
client, con1,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
con1 = client.reload(con1)
assert con1.healthState == "healthy"
assert con1.state == "running"
# Check for DNS resolution
validate_linked_service(client, client_service, [service], cport)
delete_all(client, [env])
def test_service_health_check_scale_up(client, socat_containers):
scale = 1
final_scale = 3
env, service = service_with_healthcheck_enabled(
client, scale)
# Scale service
service = client.update(service, name=service.name, scale=final_scale)
service = client.wait_success(service, 300)
assert service.state == "active"
assert service.scale == final_scale
check_container_in_service(client, service)
check_for_healthstate(client, service)
delete_all(client, [env])
def test_service_health_check_reconcile_on_stop(
client, socat_containers):
scale = 3
env, service = service_with_healthcheck_enabled(
client, scale)
check_for_service_reconciliation_on_stop(client, service)
check_for_healthstate(client, service)
delete_all(client, [env])
def test_service_health_check_reconcile_on_delete(
client, socat_containers):
scale = 3
env, service = service_with_healthcheck_enabled(
client, scale)
check_for_service_reconciliation_on_delete(client, service)
check_for_healthstate(client, service)
delete_all(client, [env])
def test_service_health_check_with_tcp(
client, socat_containers):
scale = 3
env, service = service_with_healthcheck_enabled(
client, scale, protocol="tcp")
delete_all(client, [env])
def test_service_with_healthcheck_container_tcp_unhealthy(
client, socat_containers):
scale = 2
port = 9997
env, service = service_with_healthcheck_enabled(
client, scale, port, protocol="tcp")
# Stop ssh service from one of the containers to trigger health check
# failure and service reconcile
container_list = get_service_container_list(client, service)
con = container_list[1]
con_host = client.by_id('host', con.hosts[0].id)
hostIpAddress = con_host.ipAddresses()[0].address
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostIpAddress, username="root",
password="root", port=port)
cmd = "service ssh stop"
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
wait_for_condition(
client, con,
lambda x: x.healthState == 'unhealthy',
lambda x: 'State is: ' + x.healthState)
con = client.reload(con)
assert con.healthState == "unhealthy"
wait_for_condition(
client, con,
lambda x: x.state in ('removed', 'purged'),
lambda x: 'State is: ' + x.healthState)
wait_for_scale_to_adjust(client, service)
con = client.reload(con)
assert con.state in ('removed', 'purged')
container_list = get_service_container_list(client, service)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
delete_all(client, [env])
@pytest.mark.skipif(True,
reason='Service names not editable from 1.6 release')
def test_service_name_unique(client):
launch_config = {"imageUuid": TEST_IMAGE_UUID}
service, env = create_env_and_svc(client, launch_config, 1)
service_name = service.name
# Should not be allowed to create service with name when service name is
# already used by a service in the same stack
with pytest.raises(ApiError) as e:
service = client.wait_success(client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config,
scale=1))
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == "name"
delete_all(client, [env])
def test_service_name_unique_create_after_delete(client):
launch_config = {"imageUuid": TEST_IMAGE_UUID}
service, env = create_env_and_svc(client, launch_config, 1)
service_name = service.name
# Should be allowed to create service with name when service name is
# used by service which is already deleted in the same stack
client.wait_success(client.delete(service))
service = client.wait_success(client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config,
scale=1))
assert service.state == "inactive"
delete_all(client, [env])
def test_service_name_unique_edit(client):
launch_config = {"imageUuid": TEST_IMAGE_UUID}
service, env = create_env_and_svc(client, launch_config, 1)
service_name = service.name
# Should not be allowed to edit existing service and set its name
# to a service name that already exist in the same stack
service = client.wait_success(client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=1))
with pytest.raises(ApiError) as e:
service = client.wait_success(client.update(service, name=service_name,
scale=1))
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == "name"
delete_all(client, [env])
def test_service_retain_ip(client):
launch_config = {"imageUuid": SSH_IMAGE_UUID}
service, env = create_env_and_svc(client, launch_config, 3, retainIp=True)
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
container_name = get_container_name(env, service, "1")
containers = client.list_container(name=container_name,
removed_null=True)
assert len(containers) == 1
container = containers[0]
ipAddress = container.primaryIpAddress
externalId = container.externalId
container = client.wait_success(client.delete(container))
assert container.state == 'removed'
wait_for_scale_to_adjust(client, service)
container_name = get_container_name(env, service, 1)
containers = client.list_container(name=container_name,
removed_null=True)
assert len(containers) == 1
container = containers[0]
assert container.state == 'running'
new_ipAddress = container.primaryIpAddress
new_externalId = container.externalId
assert ipAddress == new_ipAddress
assert externalId != new_externalId
def test_services_rolling_strategy(client,
socat_containers):
launch_config = {"imageUuid": SSH_IMAGE_UUID,
}
service, env = create_env_and_svc(client, launch_config, 5)
env = env.activateservices()
service = client.wait_success(service, 300)
container_list = get_service_container_list(client, service)
assert len(container_list) == 5
for con in container_list:
assert con.state == "running"
assert con.startCount == 1
# Specify rolling restart strategy with batchsize 2 and interval of 1000 ms
rollingrestartstrategy = {"batchSize": 2,
"intervalMillis": 1000
}
service = client.wait_success(service.restart(
rollingRestartStrategy=rollingrestartstrategy))
assert service.state == "active"
check_container_in_service(client, service)
container_list = get_service_container_list(client, service)
assert len(container_list) == 5
for con in container_list:
assert con.state == "running"
assert con.startCount == 2
env = client.reload(env)
assert env.healthState == "healthy"
delete_all(client, [env])
def test_service_reconcile_on_stop_exposed_port(client,
socat_containers):
port = "45"
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
service, env = create_env_and_svc(client, launch_config, scale=3)
env = env.activateservices()
env = client.wait_success(env, SERVICE_WAIT_TIMEOUT)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
check_for_service_reconciliation_on_stop(client, service)
delete_all(client, [env])
def test_service_reconcile_on_restart_exposed_port(client,
socat_containers):
port = "46"
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
service, env = create_env_and_svc(client, launch_config, scale=3)
env = env.activateservices()
env = client.wait_success(env, SERVICE_WAIT_TIMEOUT)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
check_for_service_reconciliation_on_restart(client, service)
delete_all(client, [env])
def test_service_reconcile_on_delete_exposed_port(client,
socat_containers):
port = "47"
launch_config = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
service, env = create_env_and_svc(client, launch_config, scale=3)
env = env.activateservices()
env = client.wait_success(env, SERVICE_WAIT_TIMEOUT)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
check_for_service_reconciliation_on_delete(client, service)
delete_all(client, [env])
def test_insvc_upgrade_start_first(client, socat_containers):
service_scale = 1
lb_scale = 1
port = "7890"
# Create a target service for LB service
env, service, lb_service = \
create_environment_with_balancer_services(
client, service_scale, lb_scale, port)
validate_lb_service(client, lb_service, port, [service])
# Upgrade the target service to invalid imageuuid so that service is
# stuck in upgrading state
inServiceStrategy = {}
inServiceStrategy["launchConfig"] = {"imageUuid": WEB_IMAGE_UUID + "abc",
'labels': {'foo': "bar"}}
inServiceStrategy["batchSize"] = 3,
inServiceStrategy["intervalMillis"] = 100,
inServiceStrategy["startFirst"] = True
service = service.upgrade_action(inServiceStrategy=inServiceStrategy)
assert service.state == "upgrading"
# Assert that the service is stuck in "upgrading" state
# because of invalid image id
time.sleep(10)
assert service.state == "upgrading"
validate_lb_service(client, lb_service, port, [service])
@if_container_refactoring
def test_global_service(client):
min_scale = 2
max_scale = 4
increment = 2
env, service = create_global_service(client, min_scale, max_scale,
increment, host_label=None)
containers = get_service_container_list(client, service)
assert len(containers) == 2
delete_all(client, [env])
def check_service_scale(client, socat_containers,
initial_scale, final_scale,
removed_instance_count=0):
service, env = create_env_and_svc_activate(client,
initial_scale)
container_list = check_container_in_service(client, service)
# Scale service
service = client.update(service, name=service.name, scale=final_scale)
service = client.wait_success(service, 300)
assert service.state == "active"
assert service.scale == final_scale
updated_container_list = check_container_in_service(client, service)
removed_container_list = []
for con in container_list:
removed = True
for updated_con in updated_container_list:
if (con.id == updated_con.id):
removed = False
break
if removed:
removed_container_list.append(con)
# Check for destroyed containers in case of scale down
if final_scale < initial_scale:
check_container_removed_from_service(client, service,
removed_container_list)
delete_all(client, [env])
def check_service_activate_stop_instance_scale(client,
socat_containers,
initial_scale, final_scale,
stop_instance_index,
removed_instance_count=0):
service, env = create_env_and_svc_activate(client,
initial_scale)
container_list = check_container_in_service(client, service)
# Stop instance
for i in stop_instance_index:
container_name = get_container_name(env, service, str(i))
containers = client.list_container(name=container_name,
include="hosts")
assert len(containers) == 1
container = containers[0]
stop_container_from_host(client, container)
service = wait_state(client, service, "active")
logger.info("service being updated - " + service.name + " - " + service.id)
# Scale service
service = client.update(service, name=service.name, scale=final_scale)
service = client.wait_success(service, 300)
assert service.state == "active"
assert service.scale == final_scale
logger.info("Scaled service - " + str(final_scale))
updated_container_list = check_container_in_service(client, service)
removed_container_list = []
for con in container_list:
removed = True
for updated_con in updated_container_list:
if (con.id == updated_con.id):
removed = False
break
if removed:
removed_container_list.append(con)
# Check for destroyed containers in case of scale down
if final_scale < initial_scale and removed_instance_count > 0:
check_container_removed_from_service(client, service,
removed_container_list)
delete_all(client, [env])
def check_service_activate_delete_instance_scale(client,
socat_containers,
initial_scale, final_scale,
delete_instance_index,
removed_instance_count=0):
service, env = create_env_and_svc_activate(client,
initial_scale)
# Delete instance
for i in delete_instance_index:
container_name = get_container_name(env, service, str(i))
container_name = get_container_name(env, service, str(i))
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
container = client.wait_success(client.delete(container))
assert container.state == 'removed'
logger.info("Delete Container -" + container_name)
service = wait_state(client, service, "active")
logger.info("service being updated " + service.name + " - " + service.id)
# Scale service
service = client.update(service, name=service.name, scale=final_scale)
service = client.wait_success(service, 300)
assert service.state == "active"
assert service.scale == final_scale
logger.info("Scaled service - " + str(final_scale))
check_container_in_service(client, service)
"""
# Check for destroyed containers in case of scale down
if final_scale < initial_scale and removed_instance_count > 0:
if removed_instance_count is not None:
check_container_removed_from_service(client, service,
removed_instance_count)
"""
delete_all(client, [env])
def _validate_add_service_link(service, client, scale):
service_maps = client. \
list_serviceExposeMap(serviceId=service.id)
assert len(service_maps) == scale
service_map = service_maps[0]
wait_for_condition(
client, service_map,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state)
def check_stopped_container_in_service(client, service):
container_list = get_service_container_list(client, service)
assert len(container_list) == service.scale
for container in container_list:
assert container.state == "stopped"
containers = client.list_container(
externalId=container.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(container.externalId)
logger.info("Checked for container stopped - " + container.name)
assert inspect["State"]["Running"] is False
def check_container_removed_from_service(client, service,
removed_container_list):
instance_maps = client.list_serviceExposeMap(serviceId=service.id)
assert len(instance_maps) == service.scale
for container in removed_container_list:
wait_for_condition(
client, container,
lambda x: x.state == "removed" or x.state == "purged",
lambda x: 'State is: ' + x.state)
if container.state == "removed":
containers = client.list_container(name=container.name,
include="hosts")
assert len(containers) == 1
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(container.externalId)
logger.info("Checked for containers removed from service - " +
container.name)
assert inspect["State"]["Running"] is False
def check_for_deleted_service(client, env, service):
service_maps = client.list_serviceExposeMap(serviceId=service.id)
for service_map in service_maps:
wait_for_condition(
client, service_map,
lambda x: x.state == "removed",
lambda x: 'State is: ' + x.state)
container = client.by_id('container', service_map.instanceId)
wait_for_condition(
client, container,
lambda x: x.state == "purged",
lambda x: 'State is: ' + x.state,
timeout=600)
logger.info("Checked for purged container - " + container.name)
def check_service_map(client, service, instance, state):
instance_service_map = client.\
list_serviceExposeMap(serviceId=service.id, instanceId=instance.id)
assert len(instance_service_map) == 1
assert instance_service_map[0].state == state
def check_for_service_reconciliation_on_stop(client, service,
stopFromRancher=False,
shouldRestart=True):
# Stop 2 containers of the service
assert service.scale > 1
containers = get_service_container_list(client, service)
assert len(containers) == service.scale
assert service.scale > 1
container1 = containers[0]
container2 = containers[1]
if not stopFromRancher:
stop_container_from_host(client, container1)
stop_container_from_host(client, container2)
else:
client.wait_success(container1.stop(), 120)
client.wait_success(container2.stop(), 120)
service = wait_state(client, service, "active")
wait_for_scale_to_adjust(client, service)
check_container_in_service(client, service)
container1 = client.reload(container1)
container2 = client.reload(container2)
if shouldRestart:
assert container1.state == 'running'
assert container2.state == 'running'
else:
assert container1.state == 'stopped'
assert container2.state == 'stopped'
def check_for_service_reconciliation_on_restart(client, service):
# Stop 2 containers of the service
assert service.scale > 1
containers = get_service_container_list(client, service)
assert len(containers) == service.scale
assert service.scale > 1
container1 = containers[0]
container1 = client.wait_success(container1.restart())
container2 = containers[1]
container2 = client.wait_success(container2.restart())
service = wait_state(client, service, "active")
wait_for_scale_to_adjust(client, service)
check_container_in_service(client, service)
container1 = client.reload(container1)
container2 = client.reload(container2)
assert container1.state == 'running'
assert container2.state == 'running'
def check_for_service_reconciliation_on_delete(client, service):
# Delete 2 containers of the service
containers = get_service_container_list(client, service)
container1 = containers[0]
container1 = client.wait_success(client.delete(container1))
container2 = containers[1]
container2 = client.wait_success(client.delete(container2))
assert container1.state == 'removed'
assert container2.state == 'removed'
wait_for_scale_to_adjust(client, service)
check_container_in_service(client, service)
def service_with_healthcheck_enabled(client, scale, port=None,
protocol="http", labels=None,
strategy=None, qcount=None,
retainIp=False):
health_check = {"name": "check1", "responseTimeout": 2000,
"interval": 2000, "healthyThreshold": 2,
"unhealthyThreshold": 3}
launch_config = {"imageUuid": HEALTH_CHECK_IMAGE_UUID,
"healthCheck": health_check
}
if protocol == "http":
health_check["requestLine"] = "GET /name.html HTTP/1.0"
health_check["port"] = 80
if protocol == "tcp":
health_check["requestLine"] = ""
health_check["port"] = 22
if strategy is not None:
health_check["strategy"] = strategy
if strategy == "recreateOnQuorum":
health_check['recreateOnQuorumStrategyConfig'] = {"quorum": qcount}
if port is not None:
launch_config["ports"] = [str(port)+":22/tcp"]
if labels is not None:
launch_config["labels"] = labels
service, env = create_env_and_svc_activate_launch_config(
client, launch_config, scale, retainIp=retainIp)
container_list = get_service_container_list(client, service)
assert \
len(container_list) == get_service_instance_count(client, service)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
return env, service
def check_for_healthstate(client, service):
container_list = get_service_container_list(client, service)
for con in container_list:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
|
rhosqeauto/InfraRed
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
nicolewhite/py2neo
|
refs/heads/release/2.0.8
|
test/core/__init__.py
|
12133432
| |
maxsocl/django
|
refs/heads/master
|
tests/db_functions/__init__.py
|
12133432
| |
disqus/django-old
|
refs/heads/master
|
django/conf/locale/sq/__init__.py
|
12133432
| |
opennode/waldur-mastermind
|
refs/heads/develop
|
src/waldur_mastermind/support/tests/unittests/__init__.py
|
12133432
| |
levigross/pyscanner
|
refs/heads/master
|
mytests/django/contrib/humanize/templatetags/__init__.py
|
12133432
| |
tod31/pyload
|
refs/heads/stable
|
module/plugins/crypter/MegauploadComFolder.py
|
5
|
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadCrypter import DeadCrypter
class MegauploadComFolder(DeadCrypter):
__name__ = "MegauploadComFolder"
__type__ = "crypter"
__version__ = "0.07"
__status__ = "stable"
__pattern__ = r'http://(?:www\.)?megaupload\.com/(\?f|xml/folderfiles\.php\?.*&?folderid)=\w+'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """Megaupload.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
|
utecuy/edx-platform
|
refs/heads/master
|
cms/envs/yaml_config.py
|
49
|
"""
This is the default template for our main set of AWS servers.
Before importing this settings file the following MUST be
defined in the environment:
* SERVICE_VARIANT - can be either "lms" or "cms"
* CONFIG_ROOT - the directory where the application
yaml config files are located
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import, undefined-variable, used-before-assignment
import yaml
from .common import *
from openedx.core.lib.logsettings import get_logger_config
from util.config_parse import convert_tokens
import os
from path import path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
# https://stackoverflow.com/questions/2890146/how-to-force-pyyaml-to-load-strings-as-unicode-objects
from yaml import Loader, SafeLoader
def construct_yaml_str(self, node):
"""
Override the default string handling function
to always return unicode objects
"""
return self.construct_scalar(node)
Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
# SERVICE_VARIANT specifies name of the variant used, which decides what YAML
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the YAML configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the YAML configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
##############################################################
#
# DEFAULT SETTINGS FOR PRODUCTION
#
# These are defaults common for all production deployments
#
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
##############################################################
#
# DEFAULT SETTINGS FOR CELERY
#
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Skip djcelery migrations, since we don't use the database as the broker
SOUTH_MIGRATION_MODULES = {
'djcelery': 'ignore',
}
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
CELERY_ALWAYS_EAGER = False
GIT_REPO_EXPORT_DIR = '/edx/var/edxapp/export_course_repos'
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = None
EMAIL_FILE_PATH = None
STATIC_URL_BASE = None
STATIC_ROOT_BASE = None
SESSION_COOKIE_NAME = None
ADDL_INSTALLED_APPS = []
AUTH_USE_CAS = False
CAS_ATTRIBUTE_CALLBACK = None
MICROSITE_ROOT_DIR = ''
SEGMENT_IO = False
DATADOG = {}
ADDL_INSTALLED_APPS = []
LOCAL_LOGLEVEL = 'INFO'
##############################################################
#
# ENV TOKEN IMPORT
#
# Currently non-secure and secure settings are managed
# in two yaml files. This section imports the non-secure
# settings and modifies them in code if necessary.
#
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.yaml") as env_file:
ENV_TOKENS = yaml.load(env_file)
ENV_TOKENS = convert_tokens(ENV_TOKENS)
##########################################
# Merge settings from common.py
#
# Before the tokens are imported directly
# into settings some dictionary settings
# need to be merged from common.py
ENV_FEATURES = ENV_TOKENS.get('FEATURES', ENV_TOKENS.get('MITX_FEATURES', {}))
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
# Delete keys from ENV_TOKENS so that when it's imported
# into settings it doesn't override what was set above
if 'FEATURES' in ENV_TOKENS:
del ENV_TOKENS['FEATURES']
vars().update(ENV_TOKENS)
##########################################
# Manipulate imported settings with code
#
# For historical reasons some settings need
# to be modified in code. For example
# conversions to other data structures that
# cannot be represented in YAML.
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
STATIC_URL += EDX_PLATFORM_REVISION + "/"
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE) / EDX_PLATFORM_REVISION
# Cache used for location mapping -- called many times with the same key/value
# in a given request.
if 'loc_cache' not in CACHES:
CACHES['loc_cache'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
}
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if SESSION_COOKIE_NAME:
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(SESSION_COOKIE_NAME)
# Additional installed apps
for app in ADDL_INSTALLED_APPS:
INSTALLED_APPS += (app,)
LOGGING = get_logger_config(LOG_DIR,
local_loglevel=LOCAL_LOGLEVEL,
logging_env=LOGGING_ENV,
debug=False,
service_variant=SERVICE_VARIANT)
if AUTH_USE_CAS:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK['module']),
CAS_ATTRIBUTE_CALLBACK['function']
)
MICROSITE_ROOT_DIR = path(MICROSITE_ROOT_DIR)
##############################################################
#
# AUTH TOKEN IMPORT
#
with open(CONFIG_ROOT / CONFIG_PREFIX + "auth.yaml") as auth_file:
AUTH_TOKENS = yaml.load(auth_file)
AUTH_TOKENS = convert_tokens(AUTH_TOKENS)
vars().update(AUTH_TOKENS)
##########################################
# Manipulate imported settings with code
#
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = SEGMENT_IO
if AWS_ACCESS_KEY_ID == "":
AWS_ACCESS_KEY_ID = None
if AWS_SECRET_ACCESS_KEY == "":
AWS_SECRET_ACCESS_KEY = None
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
# TODO: deprecated (compatibility with previous settings)
if 'DATADOG_API' in AUTH_TOKENS:
DATADOG['api_key'] = AUTH_TOKENS['DATADOG_API']
BROKER_URL = "{0}://{1}:{2}@{3}/{4}".format(CELERY_BROKER_TRANSPORT,
CELERY_BROKER_USER,
CELERY_BROKER_PASSWORD,
CELERY_BROKER_HOSTNAME,
CELERY_BROKER_VHOST)
|
Omegaphora/external_chromium_org_tools_gyp
|
refs/heads/lp5.1
|
test/msvs/multiple_actions_error_handling/action_succeed.py
|
124
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
sys.exit(0)
|
iamshawnrice/htmlboilerplate
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSProject.py
|
2736
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
|
bliti/django-nonrel-1.5
|
refs/heads/nonrel-1.5
|
django/core/management/sql.py
|
104
|
from __future__ import unicode_literals
import codecs
import os
import re
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models
from django.db.models import get_models
from django.utils._os import upath
def sql_create(app, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the database.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't properly specified the ENGINE setting for the database.\n" +
"see: https://docs.djangoproject.com/en/dev/ref/settings/#databases")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = models.get_models(app, include_auto_created=True)
final_output = []
tables = connection.introspection.table_names()
known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models])
pending_references = {}
for model in app_models:
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app, style, connection):
"Returns a list of the DROP TABLE SQL statements for the given app."
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except:
cursor = None
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.table_names(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = models.get_models(app, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append((model, f))
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_flush(style, connection, only_django=False, reset_sequences=True):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs)
return statements
def sql_custom(app, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
output = []
app_models = get_models(app)
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
output = []
for model in models.get_models(app):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_all(app, style, connection):
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection)
def _split_statements(content):
comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$")
statements = []
statement = []
for line in content.split("\n"):
cleaned_line = comment_re.sub(r"\1", line).strip()
if not cleaned_line:
continue
statement.append(cleaned_line)
if cleaned_line.endswith(";"):
statements.append(" ".join(statement))
statement = []
return statements
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dir = os.path.normpath(os.path.join(os.path.dirname(upath(models.get_app(model._meta.app_label).__file__)), 'sql'))
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.object_name.lower(), backend_name)),
os.path.join(app_dir, "%s.sql" % opts.object_name.lower())]
for sql_file in sql_files:
if os.path.exists(sql_file):
with codecs.open(sql_file, 'U', encoding=settings.FILE_CHARSET) as fp:
# Some backends can't execute more than one SQL statement at a time,
# so split into separate statements.
output.extend(_split_statements(fp.read()))
return output
def emit_post_sync_signal(created_models, verbosity, interactive, db):
# Emit the post_sync signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print("Running post-sync handlers for application %s" % app_name)
models.signals.post_syncdb.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
|
hale36/SRTV
|
refs/heads/master
|
lib/subliminal/__init__.py
|
13
|
# -*- coding: utf-8 -*-
__title__ = 'subliminal'
__version__ = '1.1.0.dev0'
__author__ = 'Antoine Bertin'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015, Antoine Bertin'
import logging
from .api import (ProviderPool, check_video, provider_manager, download_best_subtitles, download_subtitles,
list_subtitles, save_subtitles)
from .cache import region
from .exceptions import Error, ProviderError
from .providers import Provider
from .subtitle import Subtitle, compute_score
from .video import SUBTITLE_EXTENSIONS, VIDEO_EXTENSIONS, Episode, Movie, Video, scan_video, scan_videos
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
SamiHiltunen/invenio-accounts
|
refs/heads/master
|
tests/conftest.py
|
1
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import os
import shutil
import tempfile
import pytest
from flask import Flask
from flask_babelex import Babel
from flask_celeryext import FlaskCeleryExt
from flask_cli import FlaskCLI, ScriptInfo
from flask_mail import Mail
from flask_menu import Menu
from invenio_db import InvenioDB, db
from simplekv.memory.redisstore import RedisStore
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from invenio_accounts import InvenioAccounts
def _app_factory(config=None):
"""Application factory."""
instance_path = tempfile.mkdtemp()
app = Flask('testapp', instance_path=instance_path)
app.config.update(
ACCOUNTS_USE_CELERY=False,
CELERY_ALWAYS_EAGER=True,
CELERY_CACHE_BACKEND="memory",
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_RESULT_BACKEND="cache",
LOGIN_DISABLED=False,
MAIL_SUPPRESS_SEND=True,
SECRET_KEY="CHANGE_ME",
SECURITY_PASSWORD_SALT="CHANGE_ME_ALSO",
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///test.db'),
SERVER_NAME='example.com',
TESTING=True,
WTF_CSRF_ENABLED=False,
)
app.config.update(config or {})
FlaskCLI(app)
Menu(app)
Babel(app)
Mail(app)
InvenioDB(app)
return app
def _database_setup(app, request):
"""Setup database."""
with app.app_context():
if not database_exists(str(db.engine.url)):
create_database(str(db.engine.url))
db.create_all()
def teardown():
with app.app_context():
drop_database(str(db.engine.url))
# Delete sessions in kvsession store
if hasattr(app, 'kvsession_store') and \
isinstance(app.kvsession_store, RedisStore):
app.kvsession_store.redis.flushall()
shutil.rmtree(app.instance_path)
request.addfinalizer(teardown)
return app
@pytest.fixture
def base_app(request):
"""Flask application fixture."""
app = _app_factory()
_database_setup(app, request)
return app
@pytest.fixture
def app(request):
"""Flask application fixture with Invenio Accounts."""
app = _app_factory()
InvenioAccounts(app)
from invenio_accounts.views import blueprint
app.register_blueprint(blueprint)
_database_setup(app, request)
return app
@pytest.fixture
def script_info(app):
"""Get ScriptInfo object for testing CLI."""
return ScriptInfo(create_app=lambda info: app)
@pytest.fixture
def task_app(request):
"""Flask application with Celery enabled."""
app = _app_factory(dict(
ACCOUNTS_USE_CELERY=True,
MAIL_SUPPRESS_SEND=True,
))
FlaskCeleryExt(app)
InvenioAccounts(app)
_database_setup(app, request)
return app
|
fgesora/odoo
|
refs/heads/8.0
|
addons/website_blog/tests/common.py
|
279
|
# -*- coding: utf-8 -*-
from openerp.tests import common
class TestWebsiteBlogCommon(common.TransactionCase):
def setUp(self):
super(TestWebsiteBlogCommon, self).setUp()
Users = self.env['res.users']
group_blog_manager_id = self.ref('base.group_document_user')
group_employee_id = self.ref('base.group_user')
group_public_id = self.ref('base.group_public')
self.user_employee = Users.with_context({'no_reset_password': True}).create({
'name': 'Armande Employee',
'login': 'armande',
'alias_name': 'armande',
'email': 'armande.employee@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_employee_id])]
})
self.user_blogmanager = Users.with_context({'no_reset_password': True}).create({
'name': 'Bastien BlogManager',
'login': 'bastien',
'alias_name': 'bastien',
'email': 'bastien.blogmanager@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_blog_manager_id, group_employee_id])]
})
self.user_public = Users.with_context({'no_reset_password': True}).create({
'name': 'Cedric Public',
'login': 'cedric',
'alias_name': 'cedric',
'email': 'cedric.public@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_public_id])]
})
|
pvlib/pvlib-python
|
refs/heads/master
|
pvlib/snow.py
|
3
|
"""
The ``snow`` module contains functions that model module snow cover and the
associated effects on PV module output
"""
import numpy as np
import pandas as pd
from pvlib.tools import sind
def _time_delta_in_hours(times):
delta = times.to_series().diff()
return delta.dt.total_seconds().div(3600)
def fully_covered_nrel(snowfall, threshold_snowfall=1.):
'''
Calculates the timesteps when the row's slant height is fully covered
by snow.
Parameters
----------
snowfall : Series
Accumulated snowfall in each time period [cm]
threshold_snowfall : float, default 1.0
Hourly snowfall above which snow coverage is set to the row's slant
height. [cm/hr]
Returns
----------
boolean: Series
True where the snowfall exceeds the defined threshold to fully cover
the panel.
Notes
-----
Implements the model described in [1]_ with minor improvements in [2]_.
References
----------
.. [1] Marion, B.; Schaefer, R.; Caine, H.; Sanchez, G. (2013).
"Measured and modeled photovoltaic system energy losses from snow for
Colorado and Wisconsin locations." Solar Energy 97; pp.112-121.
.. [2] Ryberg, D; Freeman, J. "Integration, Validation, and Application
of a PV Snow Coverage Model in SAM" (2017) NREL Technical Report
NREL/TP-6A20-68705
'''
timestep = _time_delta_in_hours(snowfall.index)
hourly_snow_rate = snowfall / timestep
# if we can infer a time frequency, use first snowfall value
# otherwise the first snowfall value is ignored
freq = pd.infer_freq(snowfall.index)
if freq is not None:
timedelta = pd.tseries.frequencies.to_offset(freq) / pd.Timedelta('1h')
hourly_snow_rate.iloc[0] = snowfall[0] / timedelta
else: # can't infer frequency from index
hourly_snow_rate[0] = 0 # replaces NaN
return hourly_snow_rate > threshold_snowfall
def coverage_nrel(snowfall, poa_irradiance, temp_air, surface_tilt,
initial_coverage=0, threshold_snowfall=1.,
can_slide_coefficient=-80., slide_amount_coefficient=0.197):
'''
Calculates the fraction of the slant height of a row of modules covered by
snow at every time step.
Implements the model described in [1]_ with minor improvements in [2]_,
with the change that the output is in fraction of the row's slant height
rather than in tenths of the row slant height. As described in [1]_, model
validation focused on fixed tilt systems.
Parameters
----------
snowfall : Series
Accumulated snowfall within each time period. [cm]
poa_irradiance : Series
Total in-plane irradiance [W/m^2]
temp_air : Series
Ambient air temperature [C]
surface_tilt : numeric
Tilt of module's from horizontal, e.g. surface facing up = 0,
surface facing horizon = 90. [degrees]
initial_coverage : float, default 0
Fraction of row's slant height that is covered with snow at the
beginning of the simulation. [unitless]
threshold_snowfall : float, default 1.0
Hourly snowfall above which snow coverage is set to the row's slant
height. [cm/hr]
can_slide_coefficient : float, default -80.
Coefficient to determine if snow can slide given irradiance and air
temperature. [W/(m^2 C)]
slide_amount_coefficient : float, default 0.197
Coefficient to determine fraction of snow that slides off in one hour.
[unitless]
Returns
-------
snow_coverage : Series
The fraction of the slant height of a row of modules that is covered
by snow at each time step.
Notes
-----
In [1]_, `can_slide_coefficient` is termed `m`, and the value of
`slide_amount_coefficient` is given in tenths of a module's slant height.
References
----------
.. [1] Marion, B.; Schaefer, R.; Caine, H.; Sanchez, G. (2013).
"Measured and modeled photovoltaic system energy losses from snow for
Colorado and Wisconsin locations." Solar Energy 97; pp.112-121.
.. [2] Ryberg, D; Freeman, J. (2017). "Integration, Validation, and
Application of a PV Snow Coverage Model in SAM" NREL Technical Report
NREL/TP-6A20-68705
'''
# find times with new snowfall
new_snowfall = fully_covered_nrel(snowfall, threshold_snowfall)
# set up output Series
snow_coverage = pd.Series(np.nan, index=poa_irradiance.index)
# determine amount that snow can slide in each timestep
can_slide = temp_air > poa_irradiance / can_slide_coefficient
slide_amt = slide_amount_coefficient * sind(surface_tilt) * \
_time_delta_in_hours(poa_irradiance.index)
slide_amt[~can_slide] = 0.
# don't slide during snow events
slide_amt[new_snowfall] = 0.
# don't slide in the interval preceding the snowfall data
slide_amt.iloc[0] = 0
# build time series of cumulative slide amounts
sliding_period_ID = new_snowfall.cumsum()
cumulative_sliding = slide_amt.groupby(sliding_period_ID).cumsum()
# set up time series of snow coverage without any sliding applied
snow_coverage[new_snowfall] = 1.0
if np.isnan(snow_coverage.iloc[0]):
snow_coverage.iloc[0] = initial_coverage
snow_coverage.ffill(inplace=True)
snow_coverage -= cumulative_sliding
# clean up periods where row is completely uncovered
return snow_coverage.clip(lower=0)
def dc_loss_nrel(snow_coverage, num_strings):
'''
Calculates the fraction of DC capacity lost due to snow coverage.
DC capacity loss assumes that if a string is partially covered by snow,
the string's capacity is lost; see [1]_, Eq. 11.8.
Module orientation is accounted for by specifying the number of cell
strings in parallel along the slant height.
For example, a typical 60-cell module has 3 parallel strings, each
comprising 20 cells in series, with the cells arranged in 6 columns of 10
cells each. For a row consisting of single modules, if the module is
mounted in portrait orientation, i.e., the row slant height is along a
column of 10 cells, there is 1 string in parallel along the row slant
height, so `num_strings=1`. In contrast, if the module is mounted in
landscape orientation with the row slant height comprising 6 cells, there
are 3 parallel strings along the row slant height, so `num_strings=3`.
Parameters
----------
snow_coverage : numeric
The fraction of row slant height covered by snow at each time step.
num_strings: int
The number of parallel-connected strings along a row slant height.
Returns
-------
loss : numeric
fraction of DC capacity loss due to snow coverage at each time step.
References
----------
.. [1] Gilman, P. et al., (2018). "SAM Photovoltaic Model Technical
Reference Update", NREL Technical Report NREL/TP-6A20-67399.
Available at https://www.nrel.gov/docs/fy18osti/67399.pdf
'''
return np.ceil(snow_coverage * num_strings) / num_strings
|
chandler14362/antlr4
|
refs/heads/master
|
runtime/Python3/src/antlr4/atn/ParserATNSimulator.py
|
4
|
#
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
#
# The embodiment of the adaptive LL(*), ALL(*), parsing strategy.
#
# <p>
# The basic complexity of the adaptive strategy makes it harder to understand.
# We begin with ATN simulation to build paths in a DFA. Subsequent prediction
# requests go through the DFA first. If they reach a state without an edge for
# the current symbol, the algorithm fails over to the ATN simulation to
# complete the DFA path for the current input (until it finds a conflict state
# or uniquely predicting state).</p>
#
# <p>
# All of that is done without using the outer context because we want to create
# a DFA that is not dependent upon the rule invocation stack when we do a
# prediction. One DFA works in all contexts. We avoid using context not
# necessarily because it's slower, although it can be, but because of the DFA
# caching problem. The closure routine only considers the rule invocation stack
# created during prediction beginning in the decision rule. For example, if
# prediction occurs without invoking another rule's ATN, there are no context
# stacks in the configurations. When lack of context leads to a conflict, we
# don't know if it's an ambiguity or a weakness in the strong LL(*) parsing
# strategy (versus full LL(*)).</p>
#
# <p>
# When SLL yields a configuration set with conflict, we rewind the input and
# retry the ATN simulation, this time using full outer context without adding
# to the DFA. Configuration context stacks will be the full invocation stacks
# from the start rule. If we get a conflict using full context, then we can
# definitively say we have a true ambiguity for that input sequence. If we
# don't get a conflict, it implies that the decision is sensitive to the outer
# context. (It is not context-sensitive in the sense of context-sensitive
# grammars.)</p>
#
# <p>
# The next time we reach this DFA state with an SLL conflict, through DFA
# simulation, we will again retry the ATN simulation using full context mode.
# This is slow because we can't save the results and have to "interpret" the
# ATN each time we get that input.</p>
#
# <p>
# <strong>CACHING FULL CONTEXT PREDICTIONS</strong></p>
#
# <p>
# We could cache results from full context to predicted alternative easily and
# that saves a lot of time but doesn't work in presence of predicates. The set
# of visible predicates from the ATN start state changes depending on the
# context, because closure can fall off the end of a rule. I tried to cache
# tuples (stack context, semantic context, predicted alt) but it was slower
# than interpreting and much more complicated. Also required a huge amount of
# memory. The goal is not to create the world's fastest parser anyway. I'd like
# to keep this algorithm simple. By launching multiple threads, we can improve
# the speed of parsing across a large number of files.</p>
#
# <p>
# There is no strict ordering between the amount of input used by SLL vs LL,
# which makes it really hard to build a cache for full context. Let's say that
# we have input A B C that leads to an SLL conflict with full context X. That
# implies that using X we might only use A B but we could also use A B C D to
# resolve conflict. Input A B C D could predict alternative 1 in one position
# in the input and A B C E could predict alternative 2 in another position in
# input. The conflicting SLL configurations could still be non-unique in the
# full context prediction, which would lead us to requiring more input than the
# original A B C. To make a prediction cache work, we have to track the exact
# input used during the previous prediction. That amounts to a cache that maps
# X to a specific DFA for that context.</p>
#
# <p>
# Something should be done for left-recursive expression predictions. They are
# likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry
# with full LL thing Sam does.</p>
#
# <p>
# <strong>AVOIDING FULL CONTEXT PREDICTION</strong></p>
#
# <p>
# We avoid doing full context retry when the outer context is empty, we did not
# dip into the outer context by falling off the end of the decision state rule,
# or when we force SLL mode.</p>
#
# <p>
# As an example of the not dip into outer context case, consider as super
# constructor calls versus function calls. One grammar might look like
# this:</p>
#
# <pre>
# ctorBody
# : '{' superCall? stat* '}'
# ;
# </pre>
#
# <p>
# Or, you might see something like</p>
#
# <pre>
# stat
# : superCall ';'
# | expression ';'
# | ...
# ;
# </pre>
#
# <p>
# In both cases I believe that no closure operations will dip into the outer
# context. In the first case ctorBody in the worst case will stop at the '}'.
# In the 2nd case it should stop at the ';'. Both cases should stay within the
# entry rule and not dip into the outer context.</p>
#
# <p>
# <strong>PREDICATES</strong></p>
#
# <p>
# Predicates are always evaluated if present in either SLL or LL both. SLL and
# LL simulation deals with predicates differently. SLL collects predicates as
# it performs closure operations like ANTLR v3 did. It delays predicate
# evaluation until it reaches and accept state. This allows us to cache the SLL
# ATN simulation whereas, if we had evaluated predicates on-the-fly during
# closure, the DFA state configuration sets would be different and we couldn't
# build up a suitable DFA.</p>
#
# <p>
# When building a DFA accept state during ATN simulation, we evaluate any
# predicates and return the sole semantically valid alternative. If there is
# more than 1 alternative, we report an ambiguity. If there are 0 alternatives,
# we throw an exception. Alternatives without predicates act like they have
# true predicates. The simple way to think about it is to strip away all
# alternatives with false predicates and choose the minimum alternative that
# remains.</p>
#
# <p>
# When we start in the DFA and reach an accept state that's predicated, we test
# those and return the minimum semantically viable alternative. If no
# alternatives are viable, we throw an exception.</p>
#
# <p>
# During full LL ATN simulation, closure always evaluates predicates and
# on-the-fly. This is crucial to reducing the configuration set size during
# closure. It hits a landmine when parsing with the Java grammar, for example,
# without this on-the-fly evaluation.</p>
#
# <p>
# <strong>SHARING DFA</strong></p>
#
# <p>
# All instances of the same parser share the same decision DFAs through a
# static field. Each instance gets its own ATN simulator but they share the
# same {@link #decisionToDFA} field. They also share a
# {@link PredictionContextCache} object that makes sure that all
# {@link PredictionContext} objects are shared among the DFA states. This makes
# a big size difference.</p>
#
# <p>
# <strong>THREAD SAFETY</strong></p>
#
# <p>
# The {@link ParserATNSimulator} locks on the {@link #decisionToDFA} field when
# it adds a new DFA object to that array. {@link #addDFAEdge}
# locks on the DFA for the current decision when setting the
# {@link DFAState#edges} field. {@link #addDFAState} locks on
# the DFA for the current decision when looking up a DFA state to see if it
# already exists. We must make sure that all requests to add DFA states that
# are equivalent result in the same shared DFA object. This is because lots of
# threads will be trying to update the DFA at once. The
# {@link #addDFAState} method also locks inside the DFA lock
# but this time on the shared context cache when it rebuilds the
# configurations' {@link PredictionContext} objects using cached
# subgraphs/nodes. No other locking occurs, even during DFA simulation. This is
# safe as long as we can guarantee that all threads referencing
# {@code s.edge[t]} get the same physical target {@link DFAState}, or
# {@code null}. Once into the DFA, the DFA simulation does not reference the
# {@link DFA#states} map. It follows the {@link DFAState#edges} field to new
# targets. The DFA simulator will either find {@link DFAState#edges} to be
# {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or
# {@code dfa.edges[t]} to be non-null. The
# {@link #addDFAEdge} method could be racing to set the field
# but in either case the DFA simulator works; if {@code null}, and requests ATN
# simulation. It could also race trying to get {@code dfa.edges[t]}, but either
# way it will work because it's not doing a test and set operation.</p>
#
# <p>
# <strong>Starting with SLL then failing to combined SLL/LL (Two-Stage
# Parsing)</strong></p>
#
# <p>
# Sam pointed out that if SLL does not give a syntax error, then there is no
# point in doing full LL, which is slower. We only have to try LL if we get a
# syntax error. For maximum speed, Sam starts the parser set to pure SLL
# mode with the {@link BailErrorStrategy}:</p>
#
# <pre>
# parser.{@link Parser#getInterpreter() getInterpreter()}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
# parser.{@link Parser#setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
# </pre>
#
# <p>
# If it does not get a syntax error, then we're done. If it does get a syntax
# error, we need to retry with the combined SLL/LL strategy.</p>
#
# <p>
# The reason this works is as follows. If there are no SLL conflicts, then the
# grammar is SLL (at least for that input set). If there is an SLL conflict,
# the full LL analysis must yield a set of viable alternatives which is a
# subset of the alternatives reported by SLL. If the LL set is a singleton,
# then the grammar is LL but not SLL. If the LL set is the same size as the SLL
# set, the decision is SLL. If the LL set has size > 1, then that decision
# is truly ambiguous on the current input. If the LL set is smaller, then the
# SLL conflict resolution might choose an alternative that the full LL would
# rule out as a possibility based upon better context information. If that's
# the case, then the SLL parse will definitely get an error because the full LL
# analysis says it's not viable. If SLL conflict resolution chooses an
# alternative within the LL set, them both SLL and LL would choose the same
# alternative because they both choose the minimum of multiple conflicting
# alternatives.</p>
#
# <p>
# Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and
# a smaller LL set called <em>s</em>. If <em>s</em> is {@code {2, 3}}, then SLL
# parsing will get an error because SLL will pursue alternative 1. If
# <em>s</em> is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will
# choose the same alternative because alternative one is the minimum of either
# set. If <em>s</em> is {@code {2}} or {@code {3}} then SLL will get a syntax
# error. If <em>s</em> is {@code {1}} then SLL will succeed.</p>
#
# <p>
# Of course, if the input is invalid, then we will get an error for sure in
# both SLL and LL parsing. Erroneous input will therefore require 2 passes over
# the input.</p>
#
import sys
from antlr4 import DFA
from antlr4.PredictionContext import PredictionContextCache, PredictionContext, SingletonPredictionContext, \
PredictionContextFromRuleContext
from antlr4.BufferedTokenStream import TokenStream
from antlr4.Parser import Parser
from antlr4.ParserRuleContext import ParserRuleContext
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from antlr4.Utils import str_list
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNConfig import ATNConfig
from antlr4.atn.ATNConfigSet import ATNConfigSet
from antlr4.atn.ATNSimulator import ATNSimulator
from antlr4.atn.ATNState import StarLoopEntryState, DecisionState, RuleStopState, ATNState
from antlr4.atn.PredictionMode import PredictionMode
from antlr4.atn.SemanticContext import SemanticContext, AND, andContext, orContext
from antlr4.atn.Transition import Transition, RuleTransition, ActionTransition, PrecedencePredicateTransition, \
PredicateTransition, AtomTransition, SetTransition, NotSetTransition
from antlr4.dfa.DFAState import DFAState, PredPrediction
from antlr4.error.Errors import NoViableAltException
class ParserATNSimulator(ATNSimulator):
debug = False
debug_list_atn_decisions = False
dfa_debug = False
retry_debug = False
def __init__(self, parser:Parser, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
super().__init__(atn, sharedContextCache)
self.parser = parser
self.decisionToDFA = decisionToDFA
# SLL, LL, or LL + exact ambig detection?#
self.predictionMode = PredictionMode.LL
# LAME globals to avoid parameters!!!!! I need these down deep in predTransition
self._input = None
self._startIndex = 0
self._outerContext = None
self._dfa = None
# Each prediction operation uses a cache for merge of prediction contexts.
# Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
# isn't synchronized but we're ok since two threads shouldn't reuse same
# parser/atnsim object because it can only handle one input at a time.
# This maps graphs a and b to merged result c. (a,b)→c. We can avoid
# the merge if we ever see a and b again. Note that (b,a)→c should
# also be examined during cache lookup.
#
self.mergeCache = None
def reset(self):
pass
def adaptivePredict(self, input:TokenStream, decision:int, outerContext:ParserRuleContext):
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("adaptivePredict decision " + str(decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
" line " + str(input.LT(1).line) + ":" +
str(input.LT(1).column))
self._input = input
self._startIndex = input.index
self._outerContext = outerContext
dfa = self.decisionToDFA[decision]
self._dfa = dfa
m = input.mark()
index = input.index
# Now we are certain to have a specific decision's DFA
# But, do we still need an initial state?
try:
if dfa.precedenceDfa:
# the start state for a precedence DFA depends on the current
# parser precedence, and is provided by a DFA method.
s0 = dfa.getPrecedenceStartState(self.parser.getPrecedence())
else:
# the start state for a "regular" DFA is just s0
s0 = dfa.s0
if s0 is None:
if outerContext is None:
outerContext = ParserRuleContext.EMPTY
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("predictATN decision " + str(dfa.decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
", outerContext=" + outerContext.toString(self.parser.literalNames, None))
fullCtx = False
s0_closure = self.computeStartState(dfa.atnStartState, ParserRuleContext.EMPTY, fullCtx)
if dfa.precedenceDfa:
# If this is a precedence DFA, we use applyPrecedenceFilter
# to convert the computed start state to a precedence start
# state. We then use DFA.setPrecedenceStartState to set the
# appropriate start state for the precedence level rather
# than simply setting DFA.s0.
#
dfa.s0.configs = s0_closure # not used for prediction but useful to know start configs anyway
s0_closure = self.applyPrecedenceFilter(s0_closure)
s0 = self.addDFAState(dfa, DFAState(configs=s0_closure))
dfa.setPrecedenceStartState(self.parser.getPrecedence(), s0)
else:
s0 = self.addDFAState(dfa, DFAState(configs=s0_closure))
dfa.s0 = s0
alt = self.execATN(dfa, s0, input, index, outerContext)
if ParserATNSimulator.debug:
print("DFA after predictATN: " + dfa.toString(self.parser.literalNames))
return alt
finally:
self._dfa = None
self.mergeCache = None # wack cache after each prediction
input.seek(index)
input.release(m)
# Performs ATN simulation to compute a predicted alternative based
# upon the remaining input, but also updates the DFA cache to avoid
# having to traverse the ATN again for the same input sequence.
# There are some key conditions we're looking for after computing a new
# set of ATN configs (proposed DFA state):
# if the set is empty, there is no viable alternative for current symbol
# does the state uniquely predict an alternative?
# does the state have a conflict that would prevent us from
# putting it on the work list?
# We also have some key operations to do:
# add an edge from previous DFA state to potentially new DFA state, D,
# upon current symbol but only if adding to work list, which means in all
# cases except no viable alternative (and possibly non-greedy decisions?)
# collecting predicates and adding semantic context to DFA accept states
# adding rule context to context-sensitive DFA accept states
# consuming an input symbol
# reporting a conflict
# reporting an ambiguity
# reporting a context sensitivity
# reporting insufficient predicates
# cover these cases:
# dead end
# single alt
# single alt + preds
# conflict
# conflict + preds
#
def execATN(self, dfa:DFA, s0:DFAState, input:TokenStream, startIndex:int, outerContext:ParserRuleContext ):
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("execATN decision " + str(dfa.decision) +
" exec LA(1)==" + self.getLookaheadName(input) +
" line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
previousD = s0
if ParserATNSimulator.debug:
print("s0 = " + str(s0))
t = input.LA(1)
while True: # while more work
D = self.getExistingTargetState(previousD, t)
if D is None:
D = self.computeTargetState(dfa, previousD, t)
if D is self.ERROR:
# if any configs in previous dipped into outer context, that
# means that input up to t actually finished entry rule
# at least for SLL decision. Full LL doesn't dip into outer
# so don't need special case.
# We will get an error no matter what so delay until after
# decision; better error message. Also, no reachable target
# ATN states in SLL implies LL will also get nowhere.
# If conflict in states that dip out, choose min since we
# will get error no matter what.
e = self.noViableAlt(input, outerContext, previousD.configs, startIndex)
input.seek(startIndex)
alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
if alt!=ATN.INVALID_ALT_NUMBER:
return alt
raise e
if D.requiresFullContext and self.predictionMode != PredictionMode.SLL:
# IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
conflictingAlts = D.configs.conflictingAlts
if D.predicates is not None:
if ParserATNSimulator.debug:
print("DFA state has preds in DFA sim LL failover")
conflictIndex = input.index
if conflictIndex != startIndex:
input.seek(startIndex)
conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
if len(conflictingAlts)==1:
if ParserATNSimulator.debug:
print("Full LL avoided")
return min(conflictingAlts)
if conflictIndex != startIndex:
# restore the index so reporting the fallback to full
# context occurs with the index at the correct spot
input.seek(conflictIndex)
if ParserATNSimulator.dfa_debug:
print("ctx sensitive state " + str(outerContext) +" in " + str(D))
fullCtx = True
s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
self.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index)
alt = self.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext)
return alt
if D.isAcceptState:
if D.predicates is None:
return D.prediction
stopIndex = input.index
input.seek(startIndex)
alts = self.evalSemanticContext(D.predicates, outerContext, True)
if len(alts)==0:
raise self.noViableAlt(input, outerContext, D.configs, startIndex)
elif len(alts)==1:
return min(alts)
else:
# report ambiguity after predicate evaluation to make sure the correct
# set of ambig alts is reported.
self.reportAmbiguity(dfa, D, startIndex, stopIndex, False, alts, D.configs)
return min(alts)
previousD = D
if t != Token.EOF:
input.consume()
t = input.LA(1)
#
# Get an existing target state for an edge in the DFA. If the target state
# for the edge has not yet been computed or is otherwise not available,
# this method returns {@code null}.
#
# @param previousD The current DFA state
# @param t The next input symbol
# @return The existing target DFA state for the given input symbol
# {@code t}, or {@code null} if the target state for this edge is not
# already cached
#
def getExistingTargetState(self, previousD:DFAState, t:int):
edges = previousD.edges
if edges is None or t + 1 < 0 or t + 1 >= len(edges):
return None
else:
return edges[t + 1]
#
# Compute a target state for an edge in the DFA, and attempt to add the
# computed state and corresponding edge to the DFA.
#
# @param dfa The DFA
# @param previousD The current DFA state
# @param t The next input symbol
#
# @return The computed target DFA state for the given input symbol
# {@code t}. If {@code t} does not lead to a valid DFA state, this method
# returns {@link #ERROR}.
#
def computeTargetState(self, dfa:DFA, previousD:DFAState, t:int):
reach = self.computeReachSet(previousD.configs, t, False)
if reach is None:
self.addDFAEdge(dfa, previousD, t, self.ERROR)
return self.ERROR
# create new target state; we'll add to DFA after it's complete
D = DFAState(configs=reach)
predictedAlt = self.getUniqueAlt(reach)
if ParserATNSimulator.debug:
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
str(PredictionMode.allSubsetsConflict(altSubSets)) + ", conflictingAlts=" +
str(self.getConflictingAlts(reach)))
if predictedAlt!=ATN.INVALID_ALT_NUMBER:
# NO CONFLICT, UNIQUELY PREDICTED ALT
D.isAcceptState = True
D.configs.uniqueAlt = predictedAlt
D.prediction = predictedAlt
elif PredictionMode.hasSLLConflictTerminatingPrediction(self.predictionMode, reach):
# MORE THAN ONE VIABLE ALTERNATIVE
D.configs.conflictingAlts = self.getConflictingAlts(reach)
D.requiresFullContext = True
# in SLL-only mode, we will stop at this state and return the minimum alt
D.isAcceptState = True
D.prediction = min(D.configs.conflictingAlts)
if D.isAcceptState and D.configs.hasSemanticContext:
self.predicateDFAState(D, self.atn.getDecisionState(dfa.decision))
if D.predicates is not None:
D.prediction = ATN.INVALID_ALT_NUMBER
# all adds to dfa are done after we've created full D state
D = self.addDFAEdge(dfa, previousD, t, D)
return D
def predicateDFAState(self, dfaState:DFAState, decisionState:DecisionState):
# We need to test all predicates, even in DFA states that
# uniquely predict alternative.
nalts = len(decisionState.transitions)
# Update DFA so reach becomes accept state with (predicate,alt)
# pairs if preds found for conflicting alts
altsToCollectPredsFrom = self.getConflictingAltsOrUniqueAlt(dfaState.configs)
altToPred = self.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
if altToPred is not None:
dfaState.predicates = self.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
dfaState.prediction = ATN.INVALID_ALT_NUMBER # make sure we use preds
else:
# There are preds in configs but they might go away
# when OR'd together like {p}? || NONE == NONE. If neither
# alt has preds, resolve to min alt
dfaState.prediction = min(altsToCollectPredsFrom)
# comes back with reach.uniqueAlt set to a valid alt
def execATNWithFullContext(self, dfa:DFA, D:DFAState, # how far we got before failing over
s0:ATNConfigSet,
input:TokenStream,
startIndex:int,
outerContext:ParserRuleContext):
if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
print("execATNWithFullContext", str(s0))
fullCtx = True
foundExactAmbig = False
reach = None
previous = s0
input.seek(startIndex)
t = input.LA(1)
predictedAlt = -1
while (True): # while more work
reach = self.computeReachSet(previous, t, fullCtx)
if reach is None:
# if any configs in previous dipped into outer context, that
# means that input up to t actually finished entry rule
# at least for LL decision. Full LL doesn't dip into outer
# so don't need special case.
# We will get an error no matter what so delay until after
# decision; better error message. Also, no reachable target
# ATN states in SLL implies LL will also get nowhere.
# If conflict in states that dip out, choose min since we
# will get error no matter what.
e = self.noViableAlt(input, outerContext, previous, startIndex)
input.seek(startIndex)
alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
if alt!=ATN.INVALID_ALT_NUMBER:
return alt
else:
raise e
altSubSets = PredictionMode.getConflictingAltSubsets(reach)
if ParserATNSimulator.debug:
print("LL altSubSets=" + str(altSubSets) + ", predict=" +
str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
reach.uniqueAlt = self.getUniqueAlt(reach)
# unique prediction?
if reach.uniqueAlt!=ATN.INVALID_ALT_NUMBER:
predictedAlt = reach.uniqueAlt
break
elif self.predictionMode is not PredictionMode.LL_EXACT_AMBIG_DETECTION:
predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets)
if predictedAlt != ATN.INVALID_ALT_NUMBER:
break
else:
# In exact ambiguity mode, we never try to terminate early.
# Just keeps scarfing until we know what the conflict is
if PredictionMode.allSubsetsConflict(altSubSets) and PredictionMode.allSubsetsEqual(altSubSets):
foundExactAmbig = True
predictedAlt = PredictionMode.getSingleViableAlt(altSubSets)
break
# else there are multiple non-conflicting subsets or
# we're not sure what the ambiguity is yet.
# So, keep going.
previous = reach
if t != Token.EOF:
input.consume()
t = input.LA(1)
# If the configuration set uniquely predicts an alternative,
# without conflict, then we know that it's a full LL decision
# not SLL.
if reach.uniqueAlt != ATN.INVALID_ALT_NUMBER :
self.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index)
return predictedAlt
# We do not check predicates here because we have checked them
# on-the-fly when doing full context prediction.
#
# In non-exact ambiguity detection mode, we might actually be able to
# detect an exact ambiguity, but I'm not going to spend the cycles
# needed to check. We only emit ambiguity warnings in exact ambiguity
# mode.
#
# For example, we might know that we have conflicting configurations.
# But, that does not mean that there is no way forward without a
# conflict. It's possible to have nonconflicting alt subsets as in:
# altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
# from
#
# [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
# (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
#
# In this case, (17,1,[5 $]) indicates there is some next sequence that
# would resolve this without conflict to alternative 1. Any other viable
# next sequence, however, is associated with a conflict. We stop
# looking for input because no amount of further lookahead will alter
# the fact that we should predict alternative 1. We just can't say for
# sure that there is an ambiguity without looking further.
self.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, None, reach)
return predictedAlt
def computeReachSet(self, closure:ATNConfigSet, t:int, fullCtx:bool):
if ParserATNSimulator.debug:
print("in computeReachSet, starting closure: " + str(closure))
if self.mergeCache is None:
self.mergeCache = dict()
intermediate = ATNConfigSet(fullCtx)
# Configurations already in a rule stop state indicate reaching the end
# of the decision rule (local context) or end of the start rule (full
# context). Once reached, these configurations are never updated by a
# closure operation, so they are handled separately for the performance
# advantage of having a smaller intermediate set when calling closure.
#
# For full-context reach operations, separate handling is required to
# ensure that the alternative matching the longest overall sequence is
# chosen when multiple such configurations can match the input.
skippedStopStates = None
# First figure out where we can reach on input t
for c in closure:
if ParserATNSimulator.debug:
print("testing " + self.getTokenName(t) + " at " + str(c))
if isinstance(c.state, RuleStopState):
if fullCtx or t == Token.EOF:
if skippedStopStates is None:
skippedStopStates = list()
skippedStopStates.append(c)
continue
for trans in c.state.transitions:
target = self.getReachableTarget(trans, t)
if target is not None:
intermediate.add(ATNConfig(state=target, config=c), self.mergeCache)
# Now figure out where the reach operation can take us...
reach = None
# This block optimizes the reach operation for intermediate sets which
# trivially indicate a termination state for the overall
# adaptivePredict operation.
#
# The conditions assume that intermediate
# contains all configurations relevant to the reach set, but this
# condition is not true when one or more configurations have been
# withheld in skippedStopStates, or when the current symbol is EOF.
#
if skippedStopStates is None and t!=Token.EOF:
if len(intermediate)==1:
# Don't pursue the closure if there is just one state.
# It can only have one alternative; just add to result
# Also don't pursue the closure if there is unique alternative
# among the configurations.
reach = intermediate
elif self.getUniqueAlt(intermediate)!=ATN.INVALID_ALT_NUMBER:
# Also don't pursue the closure if there is unique alternative
# among the configurations.
reach = intermediate
# If the reach set could not be trivially determined, perform a closure
# operation on the intermediate set to compute its initial value.
#
if reach is None:
reach = ATNConfigSet(fullCtx)
closureBusy = set()
treatEofAsEpsilon = t == Token.EOF
for c in intermediate:
self.closure(c, reach, closureBusy, False, fullCtx, treatEofAsEpsilon)
if t == Token.EOF:
# After consuming EOF no additional input is possible, so we are
# only interested in configurations which reached the end of the
# decision rule (local context) or end of the start rule (full
# context). Update reach to contain only these configurations. This
# handles both explicit EOF transitions in the grammar and implicit
# EOF transitions following the end of the decision or start rule.
#
# When reach==intermediate, no closure operation was performed. In
# this case, removeAllConfigsNotInRuleStopState needs to check for
# reachable rule stop states as well as configurations already in
# a rule stop state.
#
# This is handled before the configurations in skippedStopStates,
# because any configurations potentially added from that list are
# already guaranteed to meet this condition whether or not it's
# required.
#
reach = self.removeAllConfigsNotInRuleStopState(reach, reach is intermediate)
# If skippedStopStates is not null, then it contains at least one
# configuration. For full-context reach operations, these
# configurations reached the end of the start rule, in which case we
# only add them back to reach if no configuration during the current
# closure operation reached such a state. This ensures adaptivePredict
# chooses an alternative matching the longest overall sequence when
# multiple alternatives are viable.
#
if skippedStopStates is not None and ( (not fullCtx) or (not PredictionMode.hasConfigInRuleStopState(reach))):
for c in skippedStopStates:
reach.add(c, self.mergeCache)
if len(reach)==0:
return None
else:
return reach
#
# Return a configuration set containing only the configurations from
# {@code configs} which are in a {@link RuleStopState}. If all
# configurations in {@code configs} are already in a rule stop state, this
# method simply returns {@code configs}.
#
# <p>When {@code lookToEndOfRule} is true, this method uses
# {@link ATN#nextTokens} for each configuration in {@code configs} which is
# not already in a rule stop state to see if a rule stop state is reachable
# from the configuration via epsilon-only transitions.</p>
#
# @param configs the configuration set to update
# @param lookToEndOfRule when true, this method checks for rule stop states
# reachable by epsilon-only transitions from each configuration in
# {@code configs}.
#
# @return {@code configs} if all configurations in {@code configs} are in a
# rule stop state, otherwise return a new configuration set containing only
# the configurations from {@code configs} which are in a rule stop state
#
def removeAllConfigsNotInRuleStopState(self, configs:ATNConfigSet, lookToEndOfRule:bool):
if PredictionMode.allConfigsInRuleStopStates(configs):
return configs
result = ATNConfigSet(configs.fullCtx)
for config in configs:
if isinstance(config.state, RuleStopState):
result.add(config, self.mergeCache)
continue
if lookToEndOfRule and config.state.epsilonOnlyTransitions:
nextTokens = self.atn.nextTokens(config.state)
if Token.EPSILON in nextTokens:
endOfRuleState = self.atn.ruleToStopState[config.state.ruleIndex]
result.add(ATNConfig(state=endOfRuleState, config=config), self.mergeCache)
return result
def computeStartState(self, p:ATNState, ctx:RuleContext, fullCtx:bool):
# always at least the implicit call to start rule
initialContext = PredictionContextFromRuleContext(self.atn, ctx)
configs = ATNConfigSet(fullCtx)
for i in range(0, len(p.transitions)):
target = p.transitions[i].target
c = ATNConfig(target, i+1, initialContext)
closureBusy = set()
self.closure(c, configs, closureBusy, True, fullCtx, False)
return configs
#
# This method transforms the start state computed by
# {@link #computeStartState} to the special start state used by a
# precedence DFA for a particular precedence value. The transformation
# process applies the following changes to the start state's configuration
# set.
#
# <ol>
# <li>Evaluate the precedence predicates for each configuration using
# {@link SemanticContext#evalPrecedence}.</li>
# <li>Remove all configurations which predict an alternative greater than
# 1, for which another configuration that predicts alternative 1 is in the
# same ATN state with the same prediction context. This transformation is
# valid for the following reasons:
# <ul>
# <li>The closure block cannot contain any epsilon transitions which bypass
# the body of the closure, so all states reachable via alternative 1 are
# part of the precedence alternatives of the transformed left-recursive
# rule.</li>
# <li>The "primary" portion of a left recursive rule cannot contain an
# epsilon transition, so the only way an alternative other than 1 can exist
# in a state that is also reachable via alternative 1 is by nesting calls
# to the left-recursive rule, with the outer calls not being at the
# preferred precedence level.</li>
# </ul>
# </li>
# </ol>
#
# <p>
# The prediction context must be considered by this filter to address
# situations like the following.
# </p>
# <code>
# <pre>
# grammar TA;
# prog: statement* EOF;
# statement: letterA | statement letterA 'b' ;
# letterA: 'a';
# </pre>
# </code>
# <p>
# If the above grammar, the ATN state immediately before the token
# reference {@code 'a'} in {@code letterA} is reachable from the left edge
# of both the primary and closure blocks of the left-recursive rule
# {@code statement}. The prediction context associated with each of these
# configurations distinguishes between them, and prevents the alternative
# which stepped out to {@code prog} (and then back in to {@code statement}
# from being eliminated by the filter.
# </p>
#
# @param configs The configuration set computed by
# {@link #computeStartState} as the start state for the DFA.
# @return The transformed configuration set representing the start state
# for a precedence DFA at a particular precedence level (determined by
# calling {@link Parser#getPrecedence}).
#
def applyPrecedenceFilter(self, configs:ATNConfigSet):
statesFromAlt1 = dict()
configSet = ATNConfigSet(configs.fullCtx)
for config in configs:
# handle alt 1 first
if config.alt != 1:
continue
updatedContext = config.semanticContext.evalPrecedence(self.parser, self._outerContext)
if updatedContext is None:
# the configuration was eliminated
continue
statesFromAlt1[config.state.stateNumber] = config.context
if updatedContext is not config.semanticContext:
configSet.add(ATNConfig(config=config, semantic=updatedContext), self.mergeCache)
else:
configSet.add(config, self.mergeCache)
for config in configs:
if config.alt == 1:
# already handled
continue
# In the future, this elimination step could be updated to also
# filter the prediction context for alternatives predicting alt>1
# (basically a graph subtraction algorithm).
#
if not config.precedenceFilterSuppressed:
context = statesFromAlt1.get(config.state.stateNumber, None)
if context==config.context:
# eliminated
continue
configSet.add(config, self.mergeCache)
return configSet
def getReachableTarget(self, trans:Transition, ttype:int):
if trans.matches(ttype, 0, self.atn.maxTokenType):
return trans.target
else:
return None
def getPredsForAmbigAlts(self, ambigAlts:set, configs:ATNConfigSet, nalts:int):
# REACH=[1|1|[]|0:0, 1|2|[]|0:1]
# altToPred starts as an array of all null contexts. The entry at index i
# corresponds to alternative i. altToPred[i] may have one of three values:
# 1. null: no ATNConfig c is found such that c.alt==i
# 2. SemanticContext.NONE: At least one ATNConfig c exists such that
# c.alt==i and c.semanticContext==SemanticContext.NONE. In other words,
# alt i has at least one unpredicated config.
# 3. Non-NONE Semantic Context: There exists at least one, and for all
# ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE.
#
# From this, it is clear that NONE||anything==NONE.
#
altToPred = [None] * (nalts + 1)
for c in configs:
if c.alt in ambigAlts:
altToPred[c.alt] = orContext(altToPred[c.alt], c.semanticContext)
nPredAlts = 0
for i in range(1, nalts+1):
if altToPred[i] is None:
altToPred[i] = SemanticContext.NONE
elif altToPred[i] is not SemanticContext.NONE:
nPredAlts += 1
# nonambig alts are null in altToPred
if nPredAlts==0:
altToPred = None
if ParserATNSimulator.debug:
print("getPredsForAmbigAlts result " + str_list(altToPred))
return altToPred
def getPredicatePredictions(self, ambigAlts:set, altToPred:list):
pairs = []
containsPredicate = False
for i in range(1, len(altToPred)):
pred = altToPred[i]
# unpredicated is indicated by SemanticContext.NONE
if ambigAlts is not None and i in ambigAlts:
pairs.append(PredPrediction(pred, i))
if pred is not SemanticContext.NONE:
containsPredicate = True
if not containsPredicate:
return None
return pairs
#
# This method is used to improve the localization of error messages by
# choosing an alternative rather than throwing a
# {@link NoViableAltException} in particular prediction scenarios where the
# {@link #ERROR} state was reached during ATN simulation.
#
# <p>
# The default implementation of this method uses the following
# algorithm to identify an ATN configuration which successfully parsed the
# decision entry rule. Choosing such an alternative ensures that the
# {@link ParserRuleContext} returned by the calling rule will be complete
# and valid, and the syntax error will be reported later at a more
# localized location.</p>
#
# <ul>
# <li>If a syntactically valid path or paths reach the end of the decision rule and
# they are semantically valid if predicated, return the min associated alt.</li>
# <li>Else, if a semantically invalid but syntactically valid path exist
# or paths exist, return the minimum associated alt.
# </li>
# <li>Otherwise, return {@link ATN#INVALID_ALT_NUMBER}.</li>
# </ul>
#
# <p>
# In some scenarios, the algorithm described above could predict an
# alternative which will result in a {@link FailedPredicateException} in
# the parser. Specifically, this could occur if the <em>only</em> configuration
# capable of successfully parsing to the end of the decision rule is
# blocked by a semantic predicate. By choosing this alternative within
# {@link #adaptivePredict} instead of throwing a
# {@link NoViableAltException}, the resulting
# {@link FailedPredicateException} in the parser will identify the specific
# predicate which is preventing the parser from successfully parsing the
# decision rule, which helps developers identify and correct logic errors
# in semantic predicates.
# </p>
#
# @param configs The ATN configurations which were valid immediately before
# the {@link #ERROR} state was reached
# @param outerContext The is the \gamma_0 initial parser context from the paper
# or the parser stack at the instant before prediction commences.
#
# @return The value to return from {@link #adaptivePredict}, or
# {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not
# identified and {@link #adaptivePredict} should report an error instead.
#
def getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet, outerContext:ParserRuleContext):
semValidConfigs, semInvalidConfigs = self.splitAccordingToSemanticValidity(configs, outerContext)
alt = self.getAltThatFinishedDecisionEntryRule(semValidConfigs)
if alt!=ATN.INVALID_ALT_NUMBER: # semantically/syntactically viable path exists
return alt
# Is there a syntactically valid path with a failed pred?
if len(semInvalidConfigs)>0:
alt = self.getAltThatFinishedDecisionEntryRule(semInvalidConfigs)
if alt!=ATN.INVALID_ALT_NUMBER: # syntactically viable path exists
return alt
return ATN.INVALID_ALT_NUMBER
def getAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet):
alts = set()
for c in configs:
if c.reachesIntoOuterContext>0 or (isinstance(c.state, RuleStopState) and c.context.hasEmptyPath() ):
alts.add(c.alt)
if len(alts)==0:
return ATN.INVALID_ALT_NUMBER
else:
return min(alts)
# Walk the list of configurations and split them according to
# those that have preds evaluating to true/false. If no pred, assume
# true pred and include in succeeded set. Returns Pair of sets.
#
# Create a new set so as not to alter the incoming parameter.
#
# Assumption: the input stream has been restored to the starting point
# prediction, which is where predicates need to evaluate.
#
def splitAccordingToSemanticValidity(self, configs:ATNConfigSet, outerContext:ParserRuleContext):
succeeded = ATNConfigSet(configs.fullCtx)
failed = ATNConfigSet(configs.fullCtx)
for c in configs:
if c.semanticContext is not SemanticContext.NONE:
predicateEvaluationResult = c.semanticContext.eval(self.parser, outerContext)
if predicateEvaluationResult:
succeeded.add(c)
else:
failed.add(c)
else:
succeeded.add(c)
return (succeeded,failed)
# Look through a list of predicate/alt pairs, returning alts for the
# pairs that win. A {@code NONE} predicate indicates an alt containing an
# unpredicated config which behaves as "always true." If !complete
# then we stop at the first predicate that evaluates to true. This
# includes pairs with null predicates.
#
def evalSemanticContext(self, predPredictions:list, outerContext:ParserRuleContext, complete:bool):
predictions = set()
for pair in predPredictions:
if pair.pred is SemanticContext.NONE:
predictions.add(pair.alt)
if not complete:
break
continue
predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
if predicateEvaluationResult:
if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
print("PREDICT " + str(pair.alt))
predictions.add(pair.alt)
if not complete:
break
return predictions
# TODO: If we are doing predicates, there is no point in pursuing
# closure operations if we reach a DFA state that uniquely predicts
# alternative. We will not be caching that DFA state and it is a
# waste to pursue the closure. Might have to advance when we do
# ambig detection thought :(
#
def closure(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, treatEofAsEpsilon:bool):
initialDepth = 0
self.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
fullCtx, initialDepth, treatEofAsEpsilon)
def closureCheckingStopState(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
if ParserATNSimulator.debug:
print("closure(" + str(config) + ")")
if isinstance(config.state, RuleStopState):
# We hit rule end. If we have context info, use it
# run thru all possible stack tops in ctx
if not config.context.isEmpty():
for i in range(0, len(config.context)):
state = config.context.getReturnState(i)
if state is PredictionContext.EMPTY_RETURN_STATE:
if fullCtx:
configs.add(ATNConfig(state=config.state, context=PredictionContext.EMPTY, config=config), self.mergeCache)
continue
else:
# we have no context info, just chase follow links (if greedy)
if ParserATNSimulator.debug:
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
self.closure_(config, configs, closureBusy, collectPredicates,
fullCtx, depth, treatEofAsEpsilon)
continue
returnState = self.atn.states[state]
newContext = config.context.getParent(i) # "pop" return state
c = ATNConfig(state=returnState, alt=config.alt, context=newContext, semantic=config.semanticContext)
# While we have context to pop back from, we may have
# gotten that context AFTER having falling off a rule.
# Make sure we track that we are now out of context.
c.reachesIntoOuterContext = config.reachesIntoOuterContext
self.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth - 1, treatEofAsEpsilon)
return
elif fullCtx:
# reached end of start rule
configs.add(config, self.mergeCache)
return
else:
# else if we have no context info, just chase follow links (if greedy)
if ParserATNSimulator.debug:
print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
# Do the actual work of walking epsilon edges#
def closure_(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
p = config.state
# optimization
if not p.epsilonOnlyTransitions:
configs.add(config, self.mergeCache)
# make sure to not return here, because EOF transitions can act as
# both epsilon transitions and non-epsilon transitions.
first = True
for t in p.transitions:
if first:
first = False
if self.canDropLoopEntryEdgeInLeftRecursiveRule(config):
continue
continueCollecting = collectPredicates and not isinstance(t, ActionTransition)
c = self.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon)
if c is not None:
if not t.isEpsilon:
if c in closureBusy:
# avoid infinite recursion for EOF* and EOF+
continue
closureBusy.add(c)
newDepth = depth
if isinstance( config.state, RuleStopState):
# target fell off end of rule; mark resulting c as having dipped into outer context
# We can't get here if incoming config was rule stop and we had context
# track how far we dip into outer context. Might
# come in handy and we avoid evaluating context dependent
# preds if this is > 0.
if c in closureBusy:
# avoid infinite recursion for right-recursive rules
continue
closureBusy.add(c)
if self._dfa is not None and self._dfa.precedenceDfa:
if t.outermostPrecedenceReturn == self._dfa.atnStartState.ruleIndex:
c.precedenceFilterSuppressed = True
c.reachesIntoOuterContext += 1
configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
newDepth -= 1
if ParserATNSimulator.debug:
print("dips into outer ctx: " + str(c))
elif isinstance(t, RuleTransition):
# latch when newDepth goes negative - once we step out of the entry context we can't return
if newDepth >= 0:
newDepth += 1
self.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon)
# Implements first-edge (loop entry) elimination as an optimization
# during closure operations. See antlr/antlr4#1398.
#
# The optimization is to avoid adding the loop entry config when
# the exit path can only lead back to the same
# StarLoopEntryState after popping context at the rule end state
# (traversing only epsilon edges, so we're still in closure, in
# this same rule).
#
# We need to detect any state that can reach loop entry on
# epsilon w/o exiting rule. We don't have to look at FOLLOW
# links, just ensure that all stack tops for config refer to key
# states in LR rule.
#
# To verify we are in the right situation we must first check
# closure is at a StarLoopEntryState generated during LR removal.
# Then we check that each stack top of context is a return state
# from one of these cases:
#
# 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state
# 2. expr op expr. The return state is the block end of internal block of (...)*
# 3. 'between' expr 'and' expr. The return state of 2nd expr reference.
# That state points at block end of internal block of (...)*.
# 4. expr '?' expr ':' expr. The return state points at block end,
# which points at loop entry state.
#
# If any is true for each stack top, then closure does not add a
# config to the current config set for edge[0], the loop entry branch.
#
# Conditions fail if any context for the current config is:
#
# a. empty (we'd fall out of expr to do a global FOLLOW which could
# even be to some weird spot in expr) or,
# b. lies outside of expr or,
# c. lies within expr but at a state not the BlockEndState
# generated during LR removal
#
# Do we need to evaluate predicates ever in closure for this case?
#
# No. Predicates, including precedence predicates, are only
# evaluated when computing a DFA start state. I.e., only before
# the lookahead (but not parser) consumes a token.
#
# There are no epsilon edges allowed in LR rule alt blocks or in
# the "primary" part (ID here). If closure is in
# StarLoopEntryState any lookahead operation will have consumed a
# token as there are no epsilon-paths that lead to
# StarLoopEntryState. We do not have to evaluate predicates
# therefore if we are in the generated StarLoopEntryState of a LR
# rule. Note that when making a prediction starting at that
# decision point, decision d=2, compute-start-state performs
# closure starting at edges[0], edges[1] emanating from
# StarLoopEntryState. That means it is not performing closure on
# StarLoopEntryState during compute-start-state.
#
# How do we know this always gives same prediction answer?
#
# Without predicates, loop entry and exit paths are ambiguous
# upon remaining input +b (in, say, a+b). Either paths lead to
# valid parses. Closure can lead to consuming + immediately or by
# falling out of this call to expr back into expr and loop back
# again to StarLoopEntryState to match +b. In this special case,
# we choose the more efficient path, which is to take the bypass
# path.
#
# The lookahead language has not changed because closure chooses
# one path over the other. Both paths lead to consuming the same
# remaining input during a lookahead operation. If the next token
# is an operator, lookahead will enter the choice block with
# operators. If it is not, lookahead will exit expr. Same as if
# closure had chosen to enter the choice block immediately.
#
# Closure is examining one config (some loopentrystate, some alt,
# context) which means it is considering exactly one alt. Closure
# always copies the same alt to any derived configs.
#
# How do we know this optimization doesn't mess up precedence in
# our parse trees?
#
# Looking through expr from left edge of stat only has to confirm
# that an input, say, a+b+c; begins with any valid interpretation
# of an expression. The precedence actually doesn't matter when
# making a decision in stat seeing through expr. It is only when
# parsing rule expr that we must use the precedence to get the
# right interpretation and, hence, parse tree.
#
# @since 4.6
#
def canDropLoopEntryEdgeInLeftRecursiveRule(self, config):
# return False
p = config.state
# First check to see if we are in StarLoopEntryState generated during
# left-recursion elimination. For efficiency, also check if
# the context has an empty stack case. If so, it would mean
# global FOLLOW so we can't perform optimization
# Are we the special loop entry/exit state? or SLL wildcard
if p.stateType != ATNState.STAR_LOOP_ENTRY \
or not p.isPrecedenceDecision \
or config.context.isEmpty() \
or config.context.hasEmptyPath():
return False
# Require all return states to return back to the same rule
# that p is in.
numCtxs = len(config.context)
for i in range(0, numCtxs): # for each stack context
returnState = self.atn.states[config.context.getReturnState(i)]
if returnState.ruleIndex != p.ruleIndex:
return False
decisionStartState = p.transitions[0].target
blockEndStateNum = decisionStartState.endState.stateNumber
blockEndState = self.atn.states[blockEndStateNum]
# Verify that the top of each stack context leads to loop entry/exit
# state through epsilon edges and w/o leaving rule.
for i in range(0, numCtxs): # for each stack context
returnStateNumber = config.context.getReturnState(i)
returnState = self.atn.states[returnStateNumber]
# all states must have single outgoing epsilon edge
if len(returnState.transitions) != 1 or not returnState.transitions[0].isEpsilon:
return False
# Look for prefix op case like 'not expr', (' type ')' expr
returnStateTarget = returnState.transitions[0].target
if returnState.stateType == ATNState.BLOCK_END and returnStateTarget is p:
continue
# Look for 'expr op expr' or case where expr's return state is block end
# of (...)* internal block; the block end points to loop back
# which points to p but we don't need to check that
if returnState is blockEndState:
continue
# Look for ternary expr ? expr : expr. The return state points at block end,
# which points at loop entry state
if returnStateTarget is blockEndState:
continue
# Look for complex prefix 'between expr and expr' case where 2nd expr's
# return state points at block end state of (...)* internal block
if returnStateTarget.stateType == ATNState.BLOCK_END \
and len(returnStateTarget.transitions) == 1 \
and returnStateTarget.transitions[0].isEpsilon \
and returnStateTarget.transitions[0].target is p:
continue
# anything else ain't conforming
return False
return True
def getRuleName(self, index:int):
if self.parser is not None and index>=0:
return self.parser.ruleNames[index]
else:
return "<rule " + str(index) + ">"
epsilonTargetMethods = dict()
epsilonTargetMethods[Transition.RULE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
sim.ruleTransition(config, t)
epsilonTargetMethods[Transition.PRECEDENCE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
sim.precedenceTransition(config, t, collectPredicates, inContext, fullCtx)
epsilonTargetMethods[Transition.PREDICATE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
sim.predTransition(config, t, collectPredicates, inContext, fullCtx)
epsilonTargetMethods[Transition.ACTION] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
sim.actionTransition(config, t)
epsilonTargetMethods[Transition.EPSILON] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
ATNConfig(state=t.target, config=config)
epsilonTargetMethods[Transition.ATOM] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
epsilonTargetMethods[Transition.RANGE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
epsilonTargetMethods[Transition.SET] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
def getEpsilonTarget(self, config:ATNConfig, t:Transition, collectPredicates:bool, inContext:bool, fullCtx:bool, treatEofAsEpsilon:bool):
m = self.epsilonTargetMethods.get(t.serializationType, None)
if m is None:
return None
else:
return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
def actionTransition(self, config:ATNConfig, t:ActionTransition):
if ParserATNSimulator.debug:
print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
return ATNConfig(state=t.target, config=config)
def precedenceTransition(self, config:ATNConfig, pt:PrecedencePredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
if ParserATNSimulator.debug:
print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
str(pt.precedence) + ">=_p, ctx dependent=true")
if self.parser is not None:
print("context surrounding pred is " + str(self.parser.getRuleInvocationStack()))
c = None
if collectPredicates and inContext:
if fullCtx:
# In full context mode, we can evaluate predicates on-the-fly
# during closure, which dramatically reduces the size of
# the config sets. It also obviates the need to test predicates
# later during conflict resolution.
currentPosition = self._input.index
self._input.seek(self._startIndex)
predSucceeds = pt.getPredicate().eval(self.parser, self._outerContext)
self._input.seek(currentPosition)
if predSucceeds:
c = ATNConfig(state=pt.target, config=config) # no pred context
else:
newSemCtx = andContext(config.semanticContext, pt.getPredicate())
c = ATNConfig(state=pt.target, semantic=newSemCtx, config=config)
else:
c = ATNConfig(state=pt.target, config=config)
if ParserATNSimulator.debug:
print("config from pred transition=" + str(c))
return c
def predTransition(self, config:ATNConfig, pt:PredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
if ParserATNSimulator.debug:
print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
if self.parser is not None:
print("context surrounding pred is " + str(self.parser.getRuleInvocationStack()))
c = None
if collectPredicates and (not pt.isCtxDependent or (pt.isCtxDependent and inContext)):
if fullCtx:
# In full context mode, we can evaluate predicates on-the-fly
# during closure, which dramatically reduces the size of
# the config sets. It also obviates the need to test predicates
# later during conflict resolution.
currentPosition = self._input.index
self._input.seek(self._startIndex)
predSucceeds = pt.getPredicate().eval(self.parser, self._outerContext)
self._input.seek(currentPosition)
if predSucceeds:
c = ATNConfig(state=pt.target, config=config) # no pred context
else:
newSemCtx = andContext(config.semanticContext, pt.getPredicate())
c = ATNConfig(state=pt.target, semantic=newSemCtx, config=config)
else:
c = ATNConfig(state=pt.target, config=config)
if ParserATNSimulator.debug:
print("config from pred transition=" + str(c))
return c
def ruleTransition(self, config:ATNConfig, t:RuleTransition):
if ParserATNSimulator.debug:
print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
returnState = t.followState
newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
return ATNConfig(state=t.target, context=newContext, config=config )
def getConflictingAlts(self, configs:ATNConfigSet):
altsets = PredictionMode.getConflictingAltSubsets(configs)
return PredictionMode.getAlts(altsets)
# Sam pointed out a problem with the previous definition, v3, of
# ambiguous states. If we have another state associated with conflicting
# alternatives, we should keep going. For example, the following grammar
#
# s : (ID | ID ID?) ';' ;
#
# When the ATN simulation reaches the state before ';', it has a DFA
# state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
# 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node
# because alternative to has another way to continue, via [6|2|[]].
# The key is that we have a single state that has config's only associated
# with a single alternative, 2, and crucially the state transitions
# among the configurations are all non-epsilon transitions. That means
# we don't consider any conflicts that include alternative 2. So, we
# ignore the conflict between alts 1 and 2. We ignore a set of
# conflicting alts when there is an intersection with an alternative
# associated with a single alt state in the state→config-list map.
#
# It's also the case that we might have two conflicting configurations but
# also a 3rd nonconflicting configuration for a different alternative:
# [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
#
# a : A | A | A B ;
#
# After matching input A, we reach the stop state for rule A, state 1.
# State 8 is the state right before B. Clearly alternatives 1 and 2
# conflict and no amount of further lookahead will separate the two.
# However, alternative 3 will be able to continue and so we do not
# stop working on this state. In the previous example, we're concerned
# with states associated with the conflicting alternatives. Here alt
# 3 is not associated with the conflicting configs, but since we can continue
# looking for input reasonably, I don't declare the state done. We
# ignore a set of conflicting alts when we have an alternative
# that we still need to pursue.
#
def getConflictingAltsOrUniqueAlt(self, configs:ATNConfigSet):
conflictingAlts = None
if configs.uniqueAlt!= ATN.INVALID_ALT_NUMBER:
conflictingAlts = set()
conflictingAlts.add(configs.uniqueAlt)
else:
conflictingAlts = configs.conflictingAlts
return conflictingAlts
def getTokenName(self, t:int):
if t==Token.EOF:
return "EOF"
if self.parser is not None and \
self.parser.literalNames is not None and \
t < len(self.parser.literalNames):
return self.parser.literalNames[t] + "<" + str(t) + ">"
else:
return str(t)
def getLookaheadName(self, input:TokenStream):
return self.getTokenName(input.LA(1))
# Used for debugging in adaptivePredict around execATN but I cut
# it out for clarity now that alg. works well. We can leave this
# "dead" code for a bit.
#
def dumpDeadEndConfigs(self, nvae:NoViableAltException):
print("dead end configs: ")
for c in nvae.getDeadEndConfigs():
trans = "no edges"
if len(c.state.transitions)>0:
t = c.state.transitions[0]
if isinstance(t, AtomTransition):
trans = "Atom "+ self.getTokenName(t.label)
elif isinstance(t, SetTransition):
neg = isinstance(t, NotSetTransition)
trans = ("~" if neg else "")+"Set "+ str(t.set)
print(c.toString(self.parser, True) + ":" + trans, file=sys.stderr)
def noViableAlt(self, input:TokenStream, outerContext:ParserRuleContext, configs:ATNConfigSet, startIndex:int):
return NoViableAltException(self.parser, input, input.get(startIndex), input.LT(1), configs, outerContext)
def getUniqueAlt(self, configs:ATNConfigSet):
alt = ATN.INVALID_ALT_NUMBER
for c in configs:
if alt == ATN.INVALID_ALT_NUMBER:
alt = c.alt # found first alt
elif c.alt!=alt:
return ATN.INVALID_ALT_NUMBER
return alt
#
# Add an edge to the DFA, if possible. This method calls
# {@link #addDFAState} to ensure the {@code to} state is present in the
# DFA. If {@code from} is {@code null}, or if {@code t} is outside the
# range of edges that can be represented in the DFA tables, this method
# returns without adding the edge to the DFA.
#
# <p>If {@code to} is {@code null}, this method returns {@code null}.
# Otherwise, this method returns the {@link DFAState} returned by calling
# {@link #addDFAState} for the {@code to} state.</p>
#
# @param dfa The DFA
# @param from The source state for the edge
# @param t The input symbol
# @param to The target state for the edge
#
# @return If {@code to} is {@code null}, this method returns {@code null};
# otherwise this method returns the result of calling {@link #addDFAState}
# on {@code to}
#
def addDFAEdge(self, dfa:DFA, from_:DFAState, t:int, to:DFAState):
if ParserATNSimulator.debug:
print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
if to is None:
return None
to = self.addDFAState(dfa, to) # used existing if possible not incoming
if from_ is None or t < -1 or t > self.atn.maxTokenType:
return to
if from_.edges is None:
from_.edges = [None] * (self.atn.maxTokenType + 2)
from_.edges[t+1] = to # connect
if ParserATNSimulator.debug:
names = None if self.parser is None else self.parser.literalNames
print("DFA=\n" + dfa.toString(names))
return to
#
# Add state {@code D} to the DFA if it is not already present, and return
# the actual instance stored in the DFA. If a state equivalent to {@code D}
# is already in the DFA, the existing state is returned. Otherwise this
# method returns {@code D} after adding it to the DFA.
#
# <p>If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and
# does not change the DFA.</p>
#
# @param dfa The dfa
# @param D The DFA state to add
# @return The state stored in the DFA. This will be either the existing
# state if {@code D} is already in the DFA, or {@code D} itself if the
# state was not already present.
#
def addDFAState(self, dfa:DFA, D:DFAState):
if D is self.ERROR:
return D
existing = dfa.states.get(D, None)
if existing is not None:
return existing
D.stateNumber = len(dfa.states)
if not D.configs.readonly:
D.configs.optimizeConfigs(self)
D.configs.setReadonly(True)
dfa.states[D] = D
if ParserATNSimulator.debug:
print("adding new DFA state: " + str(D))
return D
def reportAttemptingFullContext(self, dfa:DFA, conflictingAlts:set, configs:ATNConfigSet, startIndex:int, stopIndex:int):
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
interval = range(startIndex, stopIndex + 1)
print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
", input=" + self.parser.getTokenStream().getText(interval))
if self.parser is not None:
self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
def reportContextSensitivity(self, dfa:DFA, prediction:int, configs:ATNConfigSet, startIndex:int, stopIndex:int):
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
interval = range(startIndex, stopIndex + 1)
print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
", input=" + self.parser.getTokenStream().getText(interval))
if self.parser is not None:
self.parser.getErrorListenerDispatch().reportContextSensitivity(self.parser, dfa, startIndex, stopIndex, prediction, configs)
# If context sensitive parsing, we know it's ambiguity not conflict#
def reportAmbiguity(self, dfa:DFA, D:DFAState, startIndex:int, stopIndex:int,
exact:bool, ambigAlts:set, configs:ATNConfigSet ):
if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
# int i = 1;
# for (Transition t : dfa.atnStartState.transitions) {
# print("ALT "+i+"=");
# print(startIndex+".."+stopIndex+", len(input)="+parser.getInputStream().size());
# TraceTree path = finder.trace(t.target, parser.getContext(), (TokenStream)parser.getInputStream(),
# startIndex, stopIndex);
# if ( path!=null ) {
# print("path = "+path.toStringTree());
# for (TraceTree leaf : path.leaves) {
# List<ATNState> states = path.getPathToNode(leaf);
# print("states="+states);
# }
# }
# i++;
# }
interval = range(startIndex, stopIndex + 1)
print("reportAmbiguity " + str(ambigAlts) + ":" + str(configs) +
", input=" + self.parser.getTokenStream().getText(interval))
if self.parser is not None:
self.parser.getErrorListenerDispatch().reportAmbiguity(self.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
|
Kazade/NeHe-Website
|
refs/heads/master
|
google_appengine/lib/django-1.5/django/contrib/gis/db/backends/spatialite/introspection.py
|
221
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
from django.utils import six
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, six.string_types) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
|
amosonn/distributed
|
refs/heads/master
|
distributed/diagnostics/progress_stream.py
|
1
|
from __future__ import print_function, division, absolute_import
import logging
from toolz import valmap, merge
from tornado import gen
from .progress import AllProgress
from ..core import connect, write, coerce_to_address
from ..scheduler import Scheduler
from ..worker import dumps_function
logger = logging.getLogger(__name__)
def counts(scheduler, allprogress):
return merge({'all': valmap(len, allprogress.all),
'nbytes': allprogress.nbytes},
{state: valmap(len, allprogress.state[state])
for state in ['memory', 'erred', 'released']})
@gen.coroutine
def progress_stream(address, interval):
""" Open a TCP connection to scheduler, receive progress messages
The messages coming back are dicts containing counts of key groups::
{'inc': {'all': 5, 'memory': 2, 'erred': 0, 'released': 1},
'dec': {'all': 1, 'memory': 0, 'erred': 0, 'released': 0}}
Parameters
----------
address: address of scheduler
interval: time between batches, in seconds
Examples
--------
>>> stream = yield eventstream('127.0.0.1:8786', 0.100) # doctest: +SKIP
>>> print(yield read(stream)) # doctest: +SKIP
"""
ip, port = coerce_to_address(address, out=tuple)
stream = yield connect(ip, port)
yield write(stream, {'op': 'feed',
'setup': dumps_function(AllProgress),
'function': dumps_function(counts),
'interval': interval,
'teardown': dumps_function(Scheduler.remove_plugin)})
raise gen.Return(stream)
def nbytes_bar(nbytes):
""" Convert nbytes message into rectangle placements
>>> nbytes_bar({'inc': 1000, 'dec': 3000}) # doctest: +NORMALIZE_WHITESPACE
{'names': ['dec', 'inc'],
'left': [0, 0.75],
'center': [0.375, 0.875],
'right': [0.75, 1.0]}
"""
total = sum(nbytes.values())
names = sorted(nbytes)
d = {'name': [],
'text': [],
'left': [],
'right': [],
'center': [],
'color': [],
'percent': [],
'MB': []}
if not total:
return d
right = 0
for name in names:
left = right
right = nbytes[name] / total + left
center = (right + left) / 2
d['MB'].append(nbytes[name] / 1000000)
d['percent'].append(round(nbytes[name] / total * 100, 2))
d['left'].append(left)
d['right'].append(right)
d['center'].append(center)
d['color'].append(task_stream_palette[incrementing_index(name)])
d['name'].append(name)
if right - left > 0.1:
d['text'].append(name)
else:
d['text'].append('')
return d
def progress_quads(msg, nrows=8, ncols=3):
"""
>>> msg = {'all': {'inc': 5, 'dec': 1, 'add': 4},
... 'memory': {'inc': 2, 'dec': 0, 'add': 1},
... 'erred': {'inc': 0, 'dec': 1, 'add': 0},
... 'released': {'inc': 1, 'dec': 0, 'add': 1}}
>>> progress_quads(msg, nrows=2) # doctest: +SKIP
{'name': ['inc', 'add', 'dec'],
'left': [0, 0, 1],
'right': [0.9, 0.9, 1.9],
'top': [0, -1, 0],
'bottom': [-.8, -1.8, -.8],
'released': [1, 1, 0],
'memory': [2, 1, 0],
'erred': [0, 0, 1],
'done': ['3 / 5', '2 / 4', '1 / 1'],
'released-loc': [.2/.9, .25 / 0.9, 1],
'memory-loc': [3 / 5 / .9, .5 / 0.9, 1],
'erred-loc': [3 / 5 / .9, .5 / 0.9, 1.9]}
"""
width = 0.9
names = sorted(msg['all'], key=msg['all'].get, reverse=True)
names = names[:nrows * ncols]
n = len(names)
d = {k: [v.get(name, 0) for name in names] for k, v in msg.items()}
d['name'] = names
d['show-name'] = [name if len(name) <= 15 else name[:12] + '...'
for name in names]
d['left'] = [i // nrows for i in range(n)]
d['right'] = [i // nrows + width for i in range(n)]
d['top'] = [-(i % nrows) for i in range(n)]
d['bottom'] = [-(i % nrows) - 0.8 for i in range(n)]
d['color'] = [task_stream_palette[incrementing_index(name)]
for name in names]
d['released-loc'] = []
d['memory-loc'] = []
d['erred-loc'] = []
d['done'] = []
for r, m, e, a, l in zip(d['released'], d['memory'],
d['erred'], d['all'], d['left']):
rl = width * r / a + l
ml = width * (r + m) / a + l
el = width * (r + m + e) / a + l
done = '%d / %d' % (r + m + e, a)
d['released-loc'].append(rl)
d['memory-loc'].append(ml)
d['erred-loc'].append(el)
d['done'].append(done)
return d
from toolz import memoize
from bokeh.palettes import Spectral11, Spectral9, viridis
import random
task_stream_palette = list(viridis(25))
random.shuffle(task_stream_palette)
import itertools
counter = itertools.count()
@memoize
def incrementing_index(o):
return next(counter)
|
deepsrijit1105/edx-platform
|
refs/heads/master
|
lms/djangoapps/branding/tests/test_page.py
|
7
|
"""
Tests for branding page
"""
import datetime
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponseRedirect
from django.test.utils import override_settings
from django.test.client import RequestFactory
from pytz import UTC
from mock import patch, Mock
from nose.plugins.attrib import attr
from edxmako.shortcuts import render_to_response
from branding.views import index
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from django.core.urlresolvers import reverse
from courseware.tests.helpers import LoginEnrollmentTestCase
from util.milestones_helpers import set_prerequisite_courses
from milestones.tests.utils import MilestonesTestCaseMixin
FEATURES_WITH_STARTDATE = settings.FEATURES.copy()
FEATURES_WITH_STARTDATE['DISABLE_START_DATES'] = False
FEATURES_WO_STARTDATE = settings.FEATURES.copy()
FEATURES_WO_STARTDATE['DISABLE_START_DATES'] = True
def mock_render_to_response(*args, **kwargs):
"""
Mock the render_to_response function
"""
return render_to_response(*args, **kwargs)
RENDER_MOCK = Mock(side_effect=mock_render_to_response)
@attr(shard=1)
class AnonymousIndexPageTest(ModuleStoreTestCase):
"""
Tests that anonymous users can access the '/' page, Need courses with start date
"""
def setUp(self):
super(AnonymousIndexPageTest, self).setUp()
self.factory = RequestFactory()
self.course = CourseFactory.create(
days_early_for_beta=5,
enrollment_start=datetime.datetime.now(UTC) + datetime.timedelta(days=3),
user_id=self.user.id,
)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_none_user_index_access_with_startdate_fails(self):
"""
This is a regression test for a bug where the incoming user is
anonymous and start dates are being checked. It replaces a previous
test as it solves the issue in a different way
"""
self.client.logout()
response = self.client.get(reverse('root'))
self.assertEqual(response.status_code, 200)
@override_settings(FEATURES=FEATURES_WITH_STARTDATE)
def test_anon_user_with_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
@override_settings(FEATURES=FEATURES_WO_STARTDATE)
def test_anon_user_no_startdate_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_allow_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the default setting is to ALLOW iframing
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'ALLOW')
@override_settings(X_FRAME_OPTIONS='DENY')
def test_deny_x_frame_options(self):
"""
Check the x-frame-option response header
"""
# check to see that the override value is honored
resp = self.client.get('/')
self.assertEquals(resp['X-Frame-Options'], 'DENY')
def test_edge_redirect_to_login(self):
"""
Test edge homepage redirect to lms login.
"""
request = self.factory.get('/')
request.user = AnonymousUser()
# HTTP Host changed to edge.
request.META["HTTP_HOST"] = "edge.edx.org"
response = index(request)
# Response should be instance of HttpResponseRedirect.
self.assertIsInstance(response, HttpResponseRedirect)
# Location should be "/login".
self.assertEqual(response._headers.get("location")[1], "/login") # pylint: disable=protected-access
@attr(shard=1)
class PreRequisiteCourseCatalog(ModuleStoreTestCase, LoginEnrollmentTestCase, MilestonesTestCaseMixin):
"""
Test to simulate and verify fix for disappearing courses in
course catalog when using pre-requisite courses
"""
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_course_with_prereq(self):
"""
Simulate having a course which has closed enrollments that has
a pre-req course
"""
pre_requisite_course = CourseFactory.create(
org='edX',
course='900',
display_name='pre requisite course',
emit_signals=True,
)
pre_requisite_courses = [unicode(pre_requisite_course.id)]
# for this failure to occur, the enrollment window needs to be in the past
course = CourseFactory.create(
org='edX',
course='1000',
display_name='course that has pre requisite',
# closed enrollment
enrollment_start=datetime.datetime(2013, 1, 1),
enrollment_end=datetime.datetime(2014, 1, 1),
start=datetime.datetime(2013, 1, 1),
end=datetime.datetime(2030, 1, 1),
pre_requisite_courses=pre_requisite_courses,
emit_signals=True,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
# make sure both courses are visible in the catalog
self.assertIn('pre requisite course', resp.content)
self.assertIn('course that has pre requisite', resp.content)
@attr(shard=1)
class IndexPageCourseCardsSortingTests(ModuleStoreTestCase):
"""
Test for Index page course cards sorting
"""
def setUp(self):
super(IndexPageCourseCardsSortingTests, self).setUp()
self.starting_later = CourseFactory.create(
org='MITx',
number='1000',
display_name='Starting later, Announced later',
metadata={
'start': datetime.datetime.now(UTC) + datetime.timedelta(days=4),
'announcement': datetime.datetime.now(UTC) + datetime.timedelta(days=3),
},
emit_signals=True,
)
self.starting_earlier = CourseFactory.create(
org='MITx',
number='1001',
display_name='Starting earlier, Announced earlier',
metadata={
'start': datetime.datetime.now(UTC) + datetime.timedelta(days=2),
'announcement': datetime.datetime.now(UTC) + datetime.timedelta(days=1),
},
emit_signals=True,
)
self.course_with_default_start_date = CourseFactory.create(
org='MITx',
number='1002',
display_name='Tech Beta Course',
emit_signals=True,
)
self.factory = RequestFactory()
@patch('student.views.render_to_response', RENDER_MOCK)
@patch('courseware.views.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_DISCOVERY': False})
def test_course_discovery_off(self):
"""
Asserts that the Course Discovery UI elements follow the
feature flag settings
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# assert that the course discovery UI is not present
self.assertNotIn('Search for a course', response.content)
# check the /courses view
response = self.client.get(reverse('branding.views.courses'))
self.assertEqual(response.status_code, 200)
# assert that the course discovery UI is not present
self.assertNotIn('Search for a course', response.content)
self.assertNotIn('<aside aria-label="Refine Your Search" class="search-facets phone-menu">', response.content)
# make sure we have the special css class on the section
self.assertIn('<div class="courses no-course-discovery"', response.content)
@patch('student.views.render_to_response', RENDER_MOCK)
@patch('courseware.views.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_DISCOVERY': True})
def test_course_discovery_on(self):
"""
Asserts that the Course Discovery UI elements follow the
feature flag settings
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# assert that the course discovery UI is not present
self.assertIn('Search for a course', response.content)
# check the /courses view
response = self.client.get(reverse('branding.views.courses'))
self.assertEqual(response.status_code, 200)
# assert that the course discovery UI is present
self.assertIn('Search for a course', response.content)
self.assertIn('<aside aria-label="Refine Your Search" class="search-facets phone-menu">', response.content)
self.assertIn('<div class="courses"', response.content)
@patch('student.views.render_to_response', RENDER_MOCK)
@patch('courseware.views.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_DISCOVERY': False})
def test_course_cards_sorted_by_default_sorting(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args # pylint: disable=unpacking-non-sequence
self.assertEqual(template, 'index.html')
# by default the courses will be sorted by their creation dates, earliest first.
self.assertEqual(context['courses'][0].id, self.starting_earlier.id)
self.assertEqual(context['courses'][1].id, self.starting_later.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
# check the /courses view
response = self.client.get(reverse('branding.views.courses'))
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args # pylint: disable=unpacking-non-sequence
self.assertEqual(template, 'courseware/courses.html')
# by default the courses will be sorted by their creation dates, earliest first.
self.assertEqual(context['courses'][0].id, self.starting_earlier.id)
self.assertEqual(context['courses'][1].id, self.starting_later.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
@patch('student.views.render_to_response', RENDER_MOCK)
@patch('courseware.views.views.render_to_response', RENDER_MOCK)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_SORTING_BY_START_DATE': False})
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_COURSE_DISCOVERY': False})
def test_course_cards_sorted_by_start_date_disabled(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args # pylint: disable=unpacking-non-sequence
self.assertEqual(template, 'index.html')
# now the courses will be sorted by their announcement dates.
self.assertEqual(context['courses'][0].id, self.starting_later.id)
self.assertEqual(context['courses'][1].id, self.starting_earlier.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
# check the /courses view as well
response = self.client.get(reverse('branding.views.courses'))
self.assertEqual(response.status_code, 200)
((template, context), _) = RENDER_MOCK.call_args # pylint: disable=unpacking-non-sequence
self.assertEqual(template, 'courseware/courses.html')
# now the courses will be sorted by their announcement dates.
self.assertEqual(context['courses'][0].id, self.starting_later.id)
self.assertEqual(context['courses'][1].id, self.starting_earlier.id)
self.assertEqual(context['courses'][2].id, self.course_with_default_start_date.id)
|
Dhivyap/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_alertsyslogconfig.py
|
28
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_alertsyslogconfig
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of AlertSyslogConfig Avi RESTful Object
description:
- This module is used to configure AlertSyslogConfig object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
description:
description:
- User defined description for alert syslog config.
name:
description:
- A user-friendly name of the syslog notification.
required: true
syslog_servers:
description:
- The list of syslog servers.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create Alert Syslog object to forward all events to external syslog server
avi_alertsyslogconfig:
controller: '{{ controller }}'
name: Roberts-syslog
password: '{{ password }}'
syslog_servers:
- syslog_server: 10.10.0.100
syslog_server_port: 514
udp: true
tenant_ref: admin
username: '{{ username }}'
"""
RETURN = '''
obj:
description: AlertSyslogConfig (api/alertsyslogconfig) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
description=dict(type='str',),
name=dict(type='str', required=True),
syslog_servers=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'alertsyslogconfig',
set([]))
if __name__ == '__main__':
main()
|
40223145c2g18/40223145
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py
|
733
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
sayan801/indivo_server
|
refs/heads/master
|
indivo/lib/query.py
|
3
|
"""
Common Functionality for support of the Query API
"""
from indivo.lib.sharing_utils import carenet_facts_filter
from indivo.lib.utils import render_template
from indivo.lib.iso8601 import parse_utc_date
from django.db.models import Avg, Count, Max, Min, Sum
from django.db import connection
from django.db.backends import postgresql_psycopg2, mysql, oracle
db_string = connection.settings_dict['ENGINE']
if '.' in db_string:
db_module, db_name = db_string.rsplit('.', 1)
DB_ENGINE = getattr(__import__(db_module, fromlist=[db_name]), db_name)
else:
DB_ENGINE = __import__(db_string)
DATE = 'date'
STRING = 'string'
NUMBER = 'number'
EXPOSED_TYPES = {
STRING: str,
DATE: parse_utc_date,
NUMBER: float
}
AGG_OPS = {
'sum': (Sum, [NUMBER]),
'avg': (Avg, [NUMBER]),
'max': (Max, [NUMBER, DATE]),
'min': (Min, [NUMBER, DATE]),
'count': (Count, [NUMBER, DATE, STRING])
}
TIME_INCRS = {
'hour': {postgresql_psycopg2:'YYYY-MM-DD-HH24',
oracle:'YYYY-MM-DD-HH24',
mysql:'%%Y-%%m-%%d-%%H',},
'day': {postgresql_psycopg2:'YYYY-MM-DD',
oracle:'YYYY-MM-DD',
mysql:'%%Y-%%m-%%d',},
'week': {postgresql_psycopg2:'YYYY-WW',
oracle:'YYYY-WW',
mysql:'%%Y-%%U',},
'month': {postgresql_psycopg2:'YYYY-MM',
oracle:'YYYY-MM',
mysql:'%%Y-%%m',},
'year': {postgresql_psycopg2:'YYYY',
oracle:'YYYY',
mysql:'%%Y',},
'hourofday': {postgresql_psycopg2:'HH24',
oracle:'HH24',
mysql:'%%H',},
'dayofweek': {postgresql_psycopg2:'D',
oracle:'D',
mysql:'%%w',},
'weekofyear': {postgresql_psycopg2:'WW',
oracle:'WW',
mysql:'%%U',},
'monthofyear': {postgresql_psycopg2:'MM',
oracle:'MM',
mysql:'%%m',},
}
FORMAT_STRS = {
postgresql_psycopg2: "to_char(\"%(field)s\", '%(format)s')",
oracle: "to_char(%(field)s, '%(format)s')",
mysql: "date_format(%(field)s, '%(format)s')",
}
OUTPUT_TEMPLATE = 'reports/report'
AGGREGATE_TEMPLATE = 'reports/aggregate.xml'
RELATED_LIST = [
'document',
'document__creator',
'document__creator__account',
'document__creator__pha',
'document__suppressed_by',
'document__suppressed_by__account',
'document__status',
]
class FactQuery(object):
def __init__(self, model, model_filters,
query_options,
record=None, carenet=None):
self.model = model
self.valid_filters = model_filters
self.group_by = query_options.get('group_by')
self.date_group = query_options.get('date_group')
self.aggregate_by = query_options.get('aggregate_by')
self.limit = query_options.get('limit')
self.offset = query_options.get('offset')
self.order_by = query_options.get('order_by')
self.status = query_options.get('status')
self.date_range = query_options.get('date_range')
self.query_filters = query_options.get('filters')
self.results = None
self.trc = None
self.aggregate_p = None
self.grouping_p = None
self.flat_aggregation = None
self.carenet = carenet
self.record = carenet.record if carenet else record
def render(self, item_template, output_template=OUTPUT_TEMPLATE):
if self.results is None:
self.execute()
if self.aggregate_by:
item_template = AGGREGATE_TEMPLATE
# if we can, iterate efficiently over our results
if hasattr(self.results, 'iterator'):
results = self.results.iterator()
else:
results = self.results
template_args = {'fobjs': results,
'trc': self.trc,
'group_by': self.group_by,
'date_group': self.date_group,
'aggregate_by': self.aggregate_by,
'limit': self.limit,
'offset': self.offset,
'order_by': self.order_by,
'status': self.status,
'date_range': self.date_range,
'filters': self.query_filters,
'item_template': item_template
}
return render_template(output_template, template_args, type="xml")
def execute(self):
'''
New API Query Interface (to be released for Beta 3)
Query operators are evaluated as follows:
1. filter operators, including date_range but excluding limit and offset, are applied first.
2. Group_by and date_group, if supplied, are evaluated next
3. Aggregate by is evaluated
4. order_by is applied
5. We evaluate the query to get an ordered list of results, the apply limit and offset.
'''
# This is okay, Django evaluates lazily
results = self.model.objects.all()
# Apply select_related for performance here
results = results.select_related(*RELATED_LIST)
# 1. Apply filter operators (but not limit/offset).
results = self._apply_filters(results)
# 2. Evaluate group_by or date_group
results = self._apply_grouping(results)
# 3. Evaluate aggregate_by
self.grouping_p = self.group_by or self.date_group
self.flat_aggregation = self.aggregate_by and not self.grouping_p
results = self._apply_aggregation(results)
# 4. Order_by
# ignore order_by if we have a single aggregation
if not self.flat_aggregation:
results = self._apply_ordering(results)
# 5. limit and offset. Make sure to grab the total result count
# before paging is applied and we lose it.
# No need to get the count or worry about paging for a flat
# aggregation, which was already evaluated
if self.flat_aggregation:
self.trc = 1
results = [results] # [{'aggregation': 'value'}]
# Avoid evaluation for as long as possible: pass back a QuerySet object
else:
self.trc = results.count()
if self.limit:
results = results[self.offset:self.offset+self.limit]
# And we're done!
self.results = results
def _apply_filters(self, results):
# Carenet filters.
# DH 04-07-2011: Moved up front and changed to not evaluate the queryset
# Need to allow queries with no record or carenet, i.e., Audit, which isn't constrained to a single record
if self.record:
results = results.filter(record=self.record)
results = carenet_facts_filter(self.carenet, results)
filter_args = {}
for field, val in self.query_filters.iteritems():
if self.valid_filters.has_key(field):
field_type = self.valid_filters[field][1]
try:
val = val.split('|')
if len(val) == 1:
parsed_val = EXPOSED_TYPES[field_type](val[0])
filter_args[self.valid_filters[field][0]] = parsed_val
else:
parsed_values = [EXPOSED_TYPES[field_type](x) for x in val]
if len(parsed_values) > 0:
filter_args[self.valid_filters[field][0] + '__in'] = parsed_values
except:
raise ValueError('Invalid argument type for field %s: expected %s, got %s'%(field, field_type, val))
else:
raise ValueError('Invalid filter for fact type %s: %s'%(self.model.__name__, field))
if self.date_range:
if self.valid_filters.has_key(self.date_range['field']):
field_type = self.valid_filters[self.date_range['field']][1]
if field_type != DATE:
raise ValueError('Date Ranges may only be calculated over fields of type "date": got %s(%s)'%(self.date_range['field'], field_type))
if self.date_range['start_date']:
filter_args['%s__gte'%(self.valid_filters[self.date_range['field']][0])] = self.date_range['start_date']
if self.date_range['end_date']:
filter_args['%s__lte'%(self.valid_filters[self.date_range['field']][0])] = self.date_range['end_date']
else:
raise ValueError('Invalid date range filter for fact type %s: %s'%(self.model.__name__, self.date_range['field']))
if self.status:
filter_args['document__status'] = self.status
if filter_args:
results = results.filter(**filter_args)
# Results look like:
# [obj1, obj2, ...] For every Fact object we haven't filtered out
return results
def _apply_grouping(self, results):
group_field = 'all'
# Handle the ordinary group
if self.group_by:
if self.valid_filters.has_key(self.group_by):
group_field = self.valid_filters[self.group_by][0]
else:
raise ValueError('Invalid grouping field for fact type %s: %s'%(self.model.__name__, self.group_by))
# Handle the date group
elif self.date_group:
if self.valid_filters.has_key(self.date_group['field']):
field_type = self.valid_filters[self.date_group['field']][1]
if field_type != DATE:
raise ValueError('Date groups may only be calculated over fields of type "date": got %s(%s)'%(self.date_group['field'], self.field_type))
group_field = self.valid_filters[self.date_group['field']][0]
date_incr = self.date_group['time_incr']
if TIME_INCRS.has_key(date_incr):
time_format = TIME_INCRS[date_incr][DB_ENGINE]
format_str = FORMAT_STRS[DB_ENGINE]
results = results.extra(select={date_incr:format_str%{"field":group_field, "format":time_format}})
# From now on, we look at the date-formatted string only
group_field = date_incr
else:
raise ValueError('Invalid date_group Increment: %s'%(date_incr))
else:
raise ValueError('Invalid grouping field for fact type %s: %s'%(self.model.__name__, self.date_group['field']))
if group_field is not 'all':
results = results.values(group_field)
# Results look like:
# [{'group_field': 'value1'}, {'group_field': 'value2'}], 1 dict per Fact object if we grouped
# if there was no grouping, results look like: [obj1, obj2, ...]
return results
def _apply_aggregation(self, results):
if self.aggregate_by:
agg_field = self.aggregate_by['field']
if self.valid_filters.has_key(agg_field):
agg_field_type = self.valid_filters[agg_field][1]
# Look up the operator
if AGG_OPS.has_key(self.aggregate_by['operator']):
agg = AGG_OPS[self.aggregate_by['operator']]
agg_func_types = agg[1]
if agg_field_type not in agg_func_types:
raise ValueError('Cannot apply aggregate function %s (type %s) to field %s (type %s)'%(self.aggregate_by['operator'], agg_func_types, agg_field, agg_field_type))
agg_func = agg[0]
agg_args = { 'aggregate_value': agg_func(self.valid_filters[agg_field][0])}
else:
raise ValueError('Invalid aggregation operator: %s'%(self.aggregate_by['operator']))
# If we grouped, handle differently
if self.grouping_p:
results = results.annotate(**agg_args)
else:
results = results.aggregate(**agg_args)
else:
raise ValueError('Invalid aggregation field for fact type %s: %s'%(self.model.__name__, agg_field))
else:
if self.grouping_p:
raise ValueError('Cannot make grouped queries without specifying an Aggregation!')
# Results look like:
# [{'group_field' : value1, 'aggregation': agg_value} ...] 1 dict per each UNIQUE group_field value
# If there was no grouping, results look like: {'aggregation': value'}
# If there was no grouping OR aggregation, results look like: [obj1, obj2...]
return results
def _apply_ordering(self, results):
if self.order_by:
desc = self.order_by[0] == '-'
order_by_field_ext = self.order_by if not desc else self.order_by[1:]
# get the internal model field for order by
if self.valid_filters.has_key(order_by_field_ext):
order_by_field = self.valid_filters[order_by_field_ext][0]
else:
raise ValueError('Invalid order by field for fact type %s: %s'%(self.model.__name__, self.order_by))
# Handle special cases of aggregation and grouping
if self.aggregate_by and order_by_field_ext == self.aggregate_by['field']:
order_by_field = 'aggregate_value'
elif self.group_by and order_by_field_ext != self.group_by:
raise ValueError('OrderBy fields in aggregations may only refer to the grouping field or the aggregation field. Your field was: %s'%(self.order_by))
elif self.date_group and order_by_field_ext != self.date_group['field']:
raise ValueError('OrderBy fields in aggregations may only refer to the grouping field or the aggregation field. Your field was: %s'%(self.order_by))
elif self.date_group:
order_by_field = self.date_group['time_incr']
# Django seems to be nondeterministic in its ordering of ties, so let's add an implicit secondary ordering on primary key
secondary_order_by = 'id'
# Do the ordering
order_by_str = order_by_field if not desc else '-'+order_by_field
results = results.order_by(order_by_str, secondary_order_by)
else:
# Clear ordering if none was specified, to avoid bad interactions with grouping
results = results.order_by()
return results
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/cognitiveservices/azure-cognitiveservices-search-imagesearch/azure/cognitiveservices/search/imagesearch/version.py
|
15
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "2.0.0"
|
yrizk/django-blog
|
refs/heads/master
|
blogvenv/lib/python3.4/site-packages/django/contrib/gis/db/backends/util.py
|
81
|
import warnings
from django.utils.deprecation import RemovedInDjango19Warning
warnings.warn(
"The django.contrib.gis.db.backends.util module has been renamed. "
"Use django.contrib.gis.db.backends.utils instead.",
RemovedInDjango19Warning, stacklevel=2)
from django.contrib.gis.db.backends.utils import * # NOQA isort:skip
|
3dfxsoftware/cbss-addons
|
refs/heads/master
|
sale_stock/company.py
|
7
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'security_lead': fields.float(
'Security Days', required=True,
help="Margin of error for dates promised to customers. "\
"Products will be scheduled for procurement and delivery "\
"that many days earlier than the actual promised date, to "\
"cope with unexpected delays in the supply chain."),
}
_defaults = {
'security_lead': 0.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
renanalencar/hermes
|
refs/heads/master
|
feedinfo/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
jpalladino84/Python-Roguelike-Framework
|
refs/heads/master
|
areas/tile.py
|
2
|
class Tile(object):
def __init__(self, x, y, is_blocked=False):
self.x = x
self.y = y
self.is_blocked = is_blocked
self.is_explored = False
self.is_ground = False
self.contains_object = None
self.block_sight = is_blocked
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.