repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
edmundgentle/schoolscript
|
refs/heads/master
|
SchoolScript/bin/Debug/pythonlib/Lib/test/test_imaplib.py
|
3
|
from test import support
# If we end up with a significant number of tests that don't require
# threading, this test module should be split. Right now we skip
# them all if we don't have threading.
threading = support.import_module('threading')
from contextlib import contextmanager
import imaplib
import os.path
import socketserver
import time
import calendar
from test.support import reap_threads, verbose, transient_internet
import unittest
try:
import ssl
except ImportError:
ssl = None
CERTFILE = None
class TestImaplib(unittest.TestCase):
def test_Internaldate2tuple(self):
t0 = calendar.timegm((2000, 1, 1, 0, 0, 0, -1, -1, -1))
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "01-Jan-2000 00:00:00 +0000")')
self.assertEqual(time.mktime(tt), t0)
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "01-Jan-2000 11:30:00 +1130")')
self.assertEqual(time.mktime(tt), t0)
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "31-Dec-1999 12:30:00 -1130")')
self.assertEqual(time.mktime(tt), t0)
def test_that_Time2Internaldate_returns_a_result(self):
# We can check only that it successfully produces a result,
# not the correctness of the result itself, since the result
# depends on the timezone the machine is in.
timevalues = [2000000000, 2000000000.0, time.localtime(2000000000),
'"18-May-2033 05:33:20 +0200"']
for t in timevalues:
imaplib.Time2Internaldate(t)
if ssl:
class SecureTCPServer(socketserver.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile=CERTFILE)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(socketserver.StreamRequestHandler):
timeout = 1
def _send(self, message):
if verbose: print("SENT: %r" % message.strip())
self.wfile.write(message)
def handle(self):
# Send a welcome message.
self._send(b'* OK IMAP4rev1\r\n')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = b''
while 1:
try:
part = self.rfile.read(1)
if part == b'':
# Naked sockets return empty strings..
return
line += part
except IOError:
# ..but SSLSockets throw exceptions.
return
if line.endswith(b'\r\n'):
break
if verbose: print('GOT: %r' % line.strip())
splitline = line.split()
tag = splitline[0].decode('ASCII')
cmd = splitline[1].decode('ASCII')
args = splitline[2:]
if hasattr(self, 'cmd_'+cmd):
getattr(self, 'cmd_'+cmd)(tag, args)
else:
self._send('{} BAD {} unknown\r\n'.format(tag, cmd).encode('ASCII'))
def cmd_CAPABILITY(self, tag, args):
self._send(b'* CAPABILITY IMAP4rev1\r\n')
self._send('{} OK CAPABILITY completed\r\n'.format(tag).encode('ASCII'))
class BaseThreadedNetworkedTests(unittest.TestCase):
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose: print("creating server")
server = MyServer(addr, hdlr)
self.assertEqual(server.server_address, server.socket.getsockname())
if verbose:
print("server created")
print("ADDR =", addr)
print("CLASS =", self.server_class)
print("HDLR =", server.RequestHandlerClass)
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
return server, t
def reap_server(self, server, thread):
if verbose: print("waiting for server")
server.shutdown()
server.server_close()
thread.join()
if verbose: print("done")
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def test_issue5949(self):
class EOFHandler(socketserver.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write(b'* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
@reap_threads
def test_line_termination(self):
class BadNewlineHandler(SimpleIMAPHandler):
def cmd_CAPABILITY(self, tag, args):
self._send(b'* CAPABILITY IMAP4rev1 AUTH\n')
self._send('{} OK CAPABILITY completed\r\n'.format(tag).encode('ASCII'))
with self.reaped_server(BadNewlineHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = socketserver.TCPServer
imap_class = imaplib.IMAP4
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
port = 143
username = 'anonymous'
password = 'pass'
imap_class = imaplib.IMAP4
def setUp(self):
with transient_internet(self.host):
self.server = self.imap_class(self.host, self.port)
def tearDown(self):
if self.server is not None:
with transient_internet(self.host):
self.server.logout()
def test_logincapa(self):
with transient_internet(self.host):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertTrue('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=ANONYMOUS' in self.server.capabilities)
rs = self.server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
def test_logout(self):
with transient_internet(self.host):
rs = self.server.logout()
self.server = None
self.assertEqual(rs[0], 'BYE')
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_STARTTLSTest(RemoteIMAPTest):
def setUp(self):
super().setUp()
with transient_internet(self.host):
rs = self.server.starttls()
self.assertEqual(rs[0], 'OK')
def test_logincapa(self):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_SSLTest(RemoteIMAPTest):
port = 993
imap_class = IMAP4_SSL
def test_logincapa(self):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=PLAIN' in self.server.capabilities)
def test_main():
tests = [TestImaplib]
if support.is_resource_enabled('network'):
if ssl:
global CERTFILE
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
if not os.path.exists(CERTFILE):
raise support.TestFailed("Can't read certificate files!")
tests.extend([
ThreadedNetworkedTests, ThreadedNetworkedTestsSSL,
RemoteIMAPTest, RemoteIMAP_SSLTest, RemoteIMAP_STARTTLSTest,
])
support.run_unittest(*tests)
if __name__ == "__main__":
support.use_resources = ['network']
test_main()
|
jennolsen84/PyTables
|
refs/heads/develop
|
bench/search-bench-plot.py
|
13
|
from __future__ import print_function
import tables
from pylab import *
def get_values(filename, complib=''):
f = tables.open_file(filename)
nrows = f.root.small.create_best.cols.nrows[:]
corrected_sizes = nrows / 10. ** 6
if mb_units:
corrected_sizes = 16 * nrows / 10. ** 6
if insert:
values = corrected_sizes / f.root.small.create_best.cols.tfill[:]
if table_size:
values = f.root.small.create_best.cols.fsize[:] / nrows
if query:
values = corrected_sizes / \
f.root.small.search_best.inkernel.int.cols.time1[:]
if query_cache:
values = corrected_sizes / \
f.root.small.search_best.inkernel.int.cols.time2[:]
f.close()
return nrows, values
def show_plot(plots, yaxis, legends, gtitle):
xlabel('Number of rows')
ylabel(yaxis)
xlim(10 ** 3, 10 ** 8)
title(gtitle)
grid(True)
# legends = [f[f.find('-'):f.index('.out')] for f in filenames]
# legends = [l.replace('-', ' ') for l in legends]
if table_size:
legend([p[0] for p in plots], legends, loc="upper right")
else:
legend([p[0] for p in plots], legends, loc="upper left")
#subplots_adjust(bottom=0.2, top=None, wspace=0.2, hspace=0.2)
if outfile:
savefig(outfile)
else:
show()
if __name__ == '__main__':
import sys
import getopt
usage = """usage: %s [-o file] [-t title] [--insert] [--table-size] [--query] [--query-cache] [--MB-units] files
-o filename for output (only .png and .jpg extensions supported)
-t title of the plot
--insert -- Insert time for table
--table-size -- Size of table
--query -- Time for querying the integer column
--query-cache -- Time for querying the integer (cached)
--MB-units -- Express speed in MB/s instead of MRows/s
\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'o:t:',
['insert',
'table-size',
'query',
'query-cache',
'MB-units',
])
except:
sys.stderr.write(usage)
sys.exit(0)
progname = sys.argv[0]
args = sys.argv[1:]
# if we pass too few parameters, abort
if len(pargs) < 1:
sys.stderr.write(usage)
sys.exit(0)
# default options
outfile = None
insert = 0
table_size = 0
query = 0
query_cache = 0
mb_units = 0
yaxis = "No axis name"
tit = None
gtitle = "Please set a title!"
# Get the options
for option in opts:
if option[0] == '-o':
outfile = option[1]
elif option[0] == '-t':
tit = option[1]
elif option[0] == '--insert':
insert = 1
yaxis = "MRows/s"
gtitle = "Writing with small (16 bytes) record size"
elif option[0] == '--table-size':
table_size = 1
yaxis = "Bytes/row"
gtitle = ("Disk space taken by a record (original record size: "
"16 bytes)")
elif option[0] == '--query':
query = 1
yaxis = "MRows/s"
gtitle = ("Selecting with small (16 bytes) record size (file not "
"in cache)")
elif option[0] == '--query-cache':
query_cache = 1
yaxis = "MRows/s"
gtitle = ("Selecting with small (16 bytes) record size (file in "
"cache)")
elif option[0] == '--MB-units':
mb_units = 1
filenames = pargs
if mb_units and yaxis == "MRows/s":
yaxis = "MB/s"
if tit:
gtitle = tit
plots = []
legends = []
for filename in filenames:
plegend = filename[filename.find('cl-') + 3:filename.index('.h5')]
plegend = plegend.replace('-', ' ')
xval, yval = get_values(filename, '')
print("Values for %s --> %s, %s" % (filename, xval, yval))
#plots.append(loglog(xval, yval, linewidth=5))
plots.append(semilogx(xval, yval, linewidth=4))
legends.append(plegend)
if 0: # Per a introduir dades simulades si es vol...
xval = [1000, 10000, 100000, 1000000, 10000000,
100000000, 1000000000]
# yval = [0.003, 0.005, 0.02, 0.06, 1.2,
# 40, 210]
yval = [0.0009, 0.0011, 0.0022, 0.005, 0.02,
0.2, 5.6]
plots.append(loglog(xval, yval, linewidth=5))
legends.append("PyTables Std")
show_plot(plots, yaxis, legends, gtitle)
|
KousikaGanesh/purchaseandInventory
|
refs/heads/master
|
openerp/addons/account_budget/report/crossovered_budget_report.py
|
22
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp import pooler
from openerp.report import report_sxw
import operator
from openerp import osv
class budget_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(budget_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'funct': self.funct,
'funct_total': self.funct_total,
'time': time,
})
self.context = context
def funct(self, object, form, ids=None, done=None, level=1):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if not done:
done = {}
global tot
tot = {
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result = []
budgets = self.pool.get('crossovered.budget').browse(self.cr, self.uid, [object.id], self.context.copy())
c_b_lines_obj = self.pool.get('crossovered.budget.lines')
acc_analytic_obj = self.pool.get('account.analytic.account')
for budget_id in budgets:
res = {}
budget_lines = []
budget_ids = []
d_from = form['date_from']
d_to = form['date_to']
for line in budget_id.crossovered_budget_line:
budget_ids.append(line.id)
if not budget_ids:
return []
self.cr.execute('SELECT DISTINCT(analytic_account_id) FROM crossovered_budget_lines WHERE id = ANY(%s)',(budget_ids,))
an_ids = self.cr.fetchall()
context = {'wizard_date_from': d_from, 'wizard_date_to': d_to}
for i in range(0, len(an_ids)):
if not an_ids[i][0]:
continue
analytic_name = acc_analytic_obj.browse(self.cr, self.uid, [an_ids[i][0]])
res={
'b_id': '-1',
'a_id': '-1',
'name': analytic_name[0].name,
'status': 1,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
result.append(res)
line_ids = c_b_lines_obj.search(self.cr, self.uid, [('id', 'in', budget_ids), ('analytic_account_id','=',an_ids[i][0]), ('date_to', '>=', d_from), ('date_from', '<=', d_to)])
line_id = c_b_lines_obj.browse(self.cr, self.uid, line_ids)
tot_theo = tot_pln = tot_prac = tot_perc = 0.00
done_budget = []
for line in line_id:
if line.id in budget_ids:
theo = pract = 0.00
theo = c_b_lines_obj._theo_amt(self.cr, self.uid, [line.id], context)[line.id]
pract = c_b_lines_obj._prac_amt(self.cr, self.uid, [line.id], context)[line.id]
if line.general_budget_id.id in done_budget:
for record in result:
if record['b_id'] == line.general_budget_id.id and record['a_id'] == line.analytic_account_id.id:
record['theo'] += theo
record['pln'] += line.planned_amount
record['prac'] += pract
if record['theo'] <> 0.00:
perc = (record['prac'] / record['theo']) * 100
else:
perc = 0.00
record['perc'] = perc
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += perc
else:
if theo <> 0.00:
perc = (pract / theo) * 100
else:
perc = 0.00
res1 = {
'a_id': line.analytic_account_id.id,
'b_id': line.general_budget_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': theo,
'pln': line.planned_amount,
'prac': pract,
'perc': perc,
}
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += perc
if form['report'] == 'analytic-full':
result.append(res1)
done_budget.append(line.general_budget_id.id)
else:
if line.general_budget_id.id in done_budget:
continue
else:
res1={
'a_id': line.analytic_account_id.id,
'b_id': line.general_budget_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
if form['report'] == 'analytic-full':
result.append(res1)
done_budget.append(line.general_budget_id.id)
if tot_theo == 0.00:
tot_perc = 0.00
else:
tot_perc = float(tot_prac / tot_theo) * 100
if form['report'] == 'analytic-full':
result[-(len(done_budget) +1)]['theo'] = tot_theo
tot['theo'] += tot_theo
result[-(len(done_budget) +1)]['pln'] = tot_pln
tot['pln'] += tot_pln
result[-(len(done_budget) +1)]['prac'] = tot_prac
tot['prac'] += tot_prac
result[-(len(done_budget) +1)]['perc'] = tot_perc
else:
result[-1]['theo'] = tot_theo
tot['theo'] += tot_theo
result[-1]['pln'] = tot_pln
tot['pln'] += tot_pln
result[-1]['prac'] = tot_prac
tot['prac'] += tot_prac
result[-1]['perc'] = tot_perc
if tot['theo'] == 0.00:
tot['perc'] = 0.00
else:
tot['perc'] = float(tot['prac'] / tot['theo']) * 100
return result
def funct_total(self, form):
result = []
res = {}
res = {
'tot_theo': tot['theo'],
'tot_pln': tot['pln'],
'tot_prac': tot['prac'],
'tot_perc': tot['perc']
}
result.append(res)
return result
report_sxw.report_sxw('report.crossovered.budget.report', 'crossovered.budget', 'addons/account_budget/report/crossovered_budget_report.rml',parser=budget_report,header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
stephaneAG/PengPod700
|
refs/heads/master
|
QtEsrc/qt-everywhere-opensource-src-4.8.5/src/3rdparty/webkit/Source/WebCore/inspector/inline-javascript-imports.py
|
62
|
#!/usr/bin/env python
#
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script replaces calls to importScripts with script sources
# in input script file and dumps result into output script file.
from cStringIO import StringIO
import os.path
import re
import sys
def main(argv):
if len(argv) < 3:
print('usage: %s inputFile importsDir outputFile' % argv[0])
return 1
inputFileName = argv[1]
importsDir = argv[2]
outputFileName = argv[3]
inputFile = open(inputFileName, 'r')
inputScript = inputFile.read()
inputFile.close()
def replace(match):
importFileName = match.group(1)
fullPath = os.path.join(importsDir, importFileName)
if not os.access(fullPath, os.F_OK):
raise Exception('File %s referenced in %s not found on any source paths, '
'check source tree for consistency' %
(importFileName, inputFileName))
importFile = open(fullPath, 'r')
importScript = importFile.read()
importFile.close()
return importScript
outputScript = re.sub(r'importScripts\([\'"]([^\'"]+)[\'"]\)', replace, inputScript)
outputFile = open(outputFileName, 'w')
outputFile.write(outputScript)
outputFile.close()
# Touch output file directory to make sure that Xcode will copy
# modified resource files.
if sys.platform == 'darwin':
outputDirName = os.path.dirname(outputFileName)
os.utime(outputDirName, None)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
vinhlh/bite-project
|
refs/heads/master
|
deps/gdata-python-client/tests/gdata_tests/photos_test.py
|
133
|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
from gdata import test_data
import gdata.photos
class AlbumFeedTest(unittest.TestCase):
def setUp(self):
self.album_feed = gdata.photos.AlbumFeedFromString(test_data.ALBUM_FEED)
def testCorrectXmlParsing(self):
self.assert_(self.album_feed.id.text == 'http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1')
self.assert_(self.album_feed.gphoto_id.text == '1')
self.assert_(len(self.album_feed.entry) == 4)
for entry in self.album_feed.entry:
if entry.id.text == 'http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2':
self.assert_(entry.summary.text == 'Blue')
class PhotoFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.photos.PhotoFeedFromString(test_data.ALBUM_FEED)
def testCorrectXmlParsing(self):
for entry in self.feed.entry:
if entry.id.text == 'http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2':
self.assert_(entry.gphoto_id.text == '2')
self.assert_(entry.albumid.text == '1')
self.assert_(entry.exif.flash.text == 'true')
self.assert_(entry.media.title.type == 'plain')
self.assert_(entry.media.title.text == 'Aqua Blue.jpg')
self.assert_(len(entry.media.thumbnail) == 3)
class AnyFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.photos.AnyFeedFromString(test_data.ALBUM_FEED)
def testEntryTypeConversion(self):
for entry in self.feed.entry:
if entry.id.text == 'http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/':
self.assert_(isinstance(entry, gdata.photos.PhotoEntry))
if __name__ == '__main__':
unittest.main()
|
ageron/tensorflow
|
refs/heads/master
|
tensorflow/python/autograph/pyct/transformer_test.py
|
4
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import test
class TransformerTest(test.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
source_code=None,
source_file=None,
namespace=None,
arg_values=None,
arg_types=None,
owner_type=None)
return transformer.Context(entity_info)
def test_entity_scope_tracking(self):
class TestTransformer(transformer.Base):
# The choice of note to assign to is arbitrary. Using Assign because it's
# easy to find in the tree.
def visit_Assign(self, node):
anno.setanno(node, 'enclosing_entities', self.enclosing_entities)
return self.generic_visit(node)
# This will show up in the lambda function.
def visit_BinOp(self, node):
anno.setanno(node, 'enclosing_entities', self.enclosing_entities)
return self.generic_visit(node)
tr = TestTransformer(self._simple_context())
def test_function():
a = 0
class TestClass(object):
def test_method(self):
b = 0
def inner_function(x):
c = 0
d = lambda y: (x + y)
return c, d
return b, inner_function
return a, TestClass
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
test_function_node = node.body[0]
test_class = test_function_node.body[1]
test_method = test_class.body[0]
inner_function = test_method.body[1]
lambda_node = inner_function.body[1].value
a = test_function_node.body[0]
b = test_method.body[0]
c = inner_function.body[0]
lambda_expr = lambda_node.body
self.assertEqual(
(test_function_node,), anno.getanno(a, 'enclosing_entities'))
self.assertEqual((test_function_node, test_class, test_method),
anno.getanno(b, 'enclosing_entities'))
self.assertEqual(
(test_function_node, test_class, test_method, inner_function),
anno.getanno(c, 'enclosing_entities'))
self.assertEqual((test_function_node, test_class, test_method,
inner_function, lambda_node),
anno.getanno(lambda_expr, 'enclosing_entities'))
def assertSameAnno(self, first, second, key):
self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
def assertDifferentAnno(self, first, second, key):
self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
def test_state_tracking(self):
class LoopState(object):
pass
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'loop_state', self.state[LoopState].value)
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_While(self, node):
self.state[LoopState].enter()
node = self.generic_visit(node)
self.state[LoopState].exit()
return node
def visit_If(self, node):
self.state[CondState].enter()
node = self.generic_visit(node)
self.state[CondState].exit()
return node
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
while a:
_ = 'a'
if a > 2:
_ = 'b'
while True:
raise '1'
if a > 3:
_ = 'c'
while True:
raise '1'
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
fn_body = node.body[0].body
outer_while_body = fn_body[1].body
self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
first_if_body = outer_while_body[1].body
self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
'cond_state')
self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
first_inner_while_body = first_if_body[1].body
self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
'cond_state')
self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
'loop_state')
second_if_body = outer_while_body[2].body
self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
second_inner_while_body = second_if_body[1].body
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'cond_state')
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'loop_state')
def test_local_scope_info_stack(self):
class TestTransformer(transformer.Base):
# Extract all string constants from the block.
def visit_Str(self, node):
self.set_local('string', self.get_local('string', default='') + node.s)
return self.generic_visit(node)
def _annotate_result(self, node):
self.enter_local_scope()
node = self.generic_visit(node)
anno.setanno(node, 'test', self.get_local('string'))
self.exit_local_scope()
return node
def visit_While(self, node):
return self._annotate_result(node)
def visit_For(self, node):
return self._annotate_result(node)
tr = TestTransformer(self._simple_context())
def test_function(a):
"""Docstring."""
assert a == 'This should not be counted'
for i in range(3):
_ = 'a'
if i > 2:
return 'b'
else:
_ = 'c'
while True:
raise '1'
return 'nor this'
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
for_node = node.body[0].body[2]
while_node = for_node.body[1].orelse[1]
self.assertFalse(anno.hasanno(for_node, 'string'))
self.assertEqual('abc', anno.getanno(for_node, 'test'))
self.assertFalse(anno.hasanno(while_node, 'string'))
self.assertEqual('1', anno.getanno(while_node, 'test'))
def test_local_scope_info_stack_checks_integrity(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
self.enter_local_scope()
return self.generic_visit(node)
def visit_For(self, node):
node = self.generic_visit(node)
self.exit_local_scope()
return node
tr = TestTransformer(self._simple_context())
def no_exit(a):
if a > 0:
print(a)
return None
node, _ = parser.parse_entity(no_exit)
with self.assertRaises(AssertionError):
tr.visit(node)
def no_entry(a):
for _ in a:
print(a)
node, _ = parser.parse_entity(no_entry)
with self.assertRaises(AssertionError):
tr.visit(node)
def test_visit_block_postprocessing(self):
class TestTransformer(transformer.Base):
def _process_body_item(self, node):
if isinstance(node, gast.Assign) and (node.value.id == 'y'):
if_node = gast.If(gast.Name('x', gast.Load(), None), [node], [])
return if_node, if_node.body
return node, None
def visit_FunctionDef(self, node):
node.body = self.visit_block(
node.body, after_visit=self._process_body_item)
return node
def test_function(x, y):
z = x
z = y
return z
tr = TestTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function)
node = tr.visit(node)
node = node.body[0]
self.assertEqual(len(node.body), 2)
self.assertTrue(isinstance(node.body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1], gast.If))
self.assertTrue(isinstance(node.body[1].body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1].body[1], gast.Return))
def test_robust_error_on_list_visit(self):
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
# This is broken because visit expects a single node, not a list, and
# the body of an if is a list.
# Importantly, the default error handling in visit also expects a single
# node. Therefore, mistakes like this need to trigger a type error
# before the visit called here installs its error handler.
# That type error can then be caught by the enclosing call to visit,
# and correctly blame the If node.
self.visit(node.body)
return node
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function)
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"'
self.assertRegexpMatches(obtained_message, expected_message)
def test_robust_error_on_ast_corruption(self):
# A child class should not be able to be so broken that it causes the error
# handling in `transformer.Base` to raise an exception. Why not? Because
# then the original error location is dropped, and an error handler higher
# up in the call stack gives misleading information.
# Here we test that the error handling in `visit` completes, and blames the
# correct original exception, even if the AST gets corrupted.
class NotANode(object):
pass
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
node.body = NotANode()
raise ValueError('I blew up')
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function)
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
# The message should reference the exception actually raised, not anything
# from the exception handler.
expected_substring = 'I blew up'
self.assertTrue(expected_substring in obtained_message, obtained_message)
if __name__ == '__main__':
test.main()
|
mm112287/2015cda_g8
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/webbrowser.py
|
735
|
from browser import window
__all__ = ["Error", "open", "open_new", "open_new_tab"]
class Error(Exception):
pass
_target = { 0: '', 1: '_blank', 2: '_new' } # hack...
def open(url, new=0, autoraise=True):
"""
new window or tab is not controllable
on the client side. autoraise not available.
"""
if window.open(url, _target[new]):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
|
rwl/PyCIM
|
refs/heads/master
|
CIM15/IEC61970/Informative/InfAssets/PowerRating.py
|
1
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class PowerRating(IdentifiedObject):
"""There are often stages of power which are associated with stages of cooling. For instance, a transformer may be rated 121kV on the primary, 15kV on the secondary and 4kV on the tertiary winding. These are voltage ratings and the power ratings are generally the same for all three windings and independent of the voltage ratings, there are instances where the tertiary may have a lower power rating. For example, for three stages, the power rating may be 15/20/25 MVA and the cooling is OA/FA/FOA. The 15 MVA rating goes with the OA cooling (Oil and Air cooling). This is called the self cooled rating as there are no external cooling enhancements. The 20 MVA rating goes with the FA cooling (Forced Air cooling), this means that when the fans are running and thus enhancing the cooling characteristics, the transformer can operate at a power level of 20 MVA. The 25 MVA rating goes with the FOA cooling (Forced Oil and Air cooling), this means that when the fans and pumps are running and thus enhancing the cooling characteristics even more than before, the transformer can operate at a power level of 25 MVA. This 15/20/25 MVA does not state how the power is split between the various windings. It may be 25 MVA input on the primary, 25 MVA output on the secondary and 0 MVA output on the tertiary. It may also operate at 25 MVA input on the primary, 17 MVA output on the secondary and 8 MVA output on the tertiary.There are often stages of power which are associated with stages of cooling. For instance, a transformer may be rated 121kV on the primary, 15kV on the secondary and 4kV on the tertiary winding. These are voltage ratings and the power ratings are generally the same for all three windings and independent of the voltage ratings, there are instances where the tertiary may have a lower power rating. For example, for three stages, the power rating may be 15/20/25 MVA and the cooling is OA/FA/FOA. The 15 MVA rating goes with the OA cooling (Oil and Air cooling). This is called the self cooled rating as there are no external cooling enhancements. The 20 MVA rating goes with the FA cooling (Forced Air cooling), this means that when the fans are running and thus enhancing the cooling characteristics, the transformer can operate at a power level of 20 MVA. The 25 MVA rating goes with the FOA cooling (Forced Oil and Air cooling), this means that when the fans and pumps are running and thus enhancing the cooling characteristics even more than before, the transformer can operate at a power level of 25 MVA. This 15/20/25 MVA does not state how the power is split between the various windings. It may be 25 MVA input on the primary, 25 MVA output on the secondary and 0 MVA output on the tertiary. It may also operate at 25 MVA input on the primary, 17 MVA output on the secondary and 8 MVA output on the tertiary.
"""
def __init__(self, stage=0, powerRating=0.0, coolingKind="forcedAir", TransformerAssets=None, *args, **kw_args):
"""Initialises a new 'PowerRating' instance.
@param stage: Stage of cooling and associated power rating.
@param powerRating: The power rating associated with type of cooling specified for this stage.
@param coolingKind: Kind of cooling system. Values are: "forcedAir", "selfCooling", "forcedOilAndAir", "other"
@param TransformerAssets:
"""
#: Stage of cooling and associated power rating.
self.stage = stage
#: The power rating associated with type of cooling specified for this stage.
self.powerRating = powerRating
#: Kind of cooling system. Values are: "forcedAir", "selfCooling", "forcedOilAndAir", "other"
self.coolingKind = coolingKind
self._TransformerAssets = []
self.TransformerAssets = [] if TransformerAssets is None else TransformerAssets
super(PowerRating, self).__init__(*args, **kw_args)
_attrs = ["stage", "powerRating", "coolingKind"]
_attr_types = {"stage": int, "powerRating": float, "coolingKind": str}
_defaults = {"stage": 0, "powerRating": 0.0, "coolingKind": "forcedAir"}
_enums = {"coolingKind": "CoolingKind"}
_refs = ["TransformerAssets"]
_many_refs = ["TransformerAssets"]
def getTransformerAssets(self):
return self._TransformerAssets
def setTransformerAssets(self, value):
for p in self._TransformerAssets:
filtered = [q for q in p.PowerRatings if q != self]
self._TransformerAssets._PowerRatings = filtered
for r in value:
if self not in r._PowerRatings:
r._PowerRatings.append(self)
self._TransformerAssets = value
TransformerAssets = property(getTransformerAssets, setTransformerAssets)
def addTransformerAssets(self, *TransformerAssets):
for obj in TransformerAssets:
if self not in obj._PowerRatings:
obj._PowerRatings.append(self)
self._TransformerAssets.append(obj)
def removeTransformerAssets(self, *TransformerAssets):
for obj in TransformerAssets:
if self in obj._PowerRatings:
obj._PowerRatings.remove(self)
self._TransformerAssets.remove(obj)
|
xitrum-framework/FrameworkBenchmarks
|
refs/heads/master
|
toolset/benchmark/test_types/update_type.py
|
22
|
from benchmark.test_types.framework_test_type import FrameworkTestType
from benchmark.test_types.verifications import verify_query_cases
class UpdateTestType(FrameworkTestType):
def __init__(self):
kwargs = {
'name': 'update',
'accept_header': self.accept('json'),
'requires_db': True,
'args': ['update_url']
}
FrameworkTestType.__init__(self, **kwargs)
def get_url(self):
return self.update_url
def verify(self, base_url):
'''Validates the response is a JSON array of
the proper length, each JSON Object in the array
has keys 'id' and 'randomNumber', and these keys
map to integers. Case insensitive and
quoting style is ignored
'''
url = base_url + self.update_url
cases = [
('2', 'fail'),
('0', 'warn'),
('foo', 'warn'),
('501', 'warn'),
('', 'warn')
]
problems = verify_query_cases(self, cases, url)
if len(problems) == 0:
return [('pass', '', url + case) for (case, _) in cases]
else:
return problems
|
sylvestre/bedrock
|
refs/heads/master
|
bedrock/base/models.py
|
9
|
from django.db import models
class ConfigValue(models.Model):
name = models.CharField(max_length=100, db_index=True, unique=True)
value = models.CharField(max_length=200)
class Meta:
app_label = 'base'
def __str__(self):
return '%s=%s' % (self.name, self.value)
def get_config_dict():
return {c.name: c.value for c in ConfigValue.objects.all()}
|
ilmir-k/website-addons
|
refs/heads/8.0
|
website_proposal/controllers/__init__.py
|
468
|
# -*- coding: utf-8 -*-
from . import main
|
vincent-psarga/job-sensors
|
refs/heads/master
|
src/jobsensors/tests/test_response.py
|
1
|
import unittest
from mock import Mock
import urllib2
from time import sleep
from db.utils import setup_db, drop_db
from jobs import response
class MockResponse(object):
def __init__(self, code):
self.code = code
def getcode(self):
return self.code
class ResponseTest(unittest.TestCase):
def setUp(self):
setup_db()
def tearDown(self):
drop_db()
def test_check(self):
sut = response.Response(1, 'My site', 'http://whatever')
sut.open_page = Mock()
result = sut.check()
self.assertEqual(result['author'], '')
self.assertEqual(result['stable'], True)
# This one is pretty hard to really test now,
# see test_time_computing below
self.assertTrue(result['value'] < 0.1)
def test_open_page(self):
# As tests are ran on Travis, it should work ... I guess
sut = response.Response(1, 'My site', 'http://travis-ci.org')
page = sut.open_page()
self.assertIsInstance(page, urllib2.addinfourl)
sut = response.Response(2, 'My site', 'http://whatever')
with self.assertRaises(Exception):
sut.open_page()
sut = response.Response(3, 'My site', 'http://google.com')
response.urlopen = Mock(return_value=MockResponse(404))
# Other responses than 200 are also considered as errors as we
# usually check the homepage.
with self.assertRaises(Exception):
sut.open_page()
def test_get_time(self):
def sleep_two():
sleep(2)
return 'Slept two seconds'
sut = response.Response(2, 'My site', 'http://whatever')
result = sut.get_time(sleep_two)
self.assertEqual(int(result['time']), 2)
self.assertEqual(result['result'], 'Slept two seconds')
def test_time_computing(self):
sut = response.Response(2, 'My site', 'http://whatever', 1)
sut.get_time = Mock(return_value={'time': 3.14})
self.assertEqual(sut.check()['value'], 3.14)
sut.count = 10
self.assertEqual(sut.check()['value'], 3.14)
def mock_get_time(func):
if not hasattr(self, 'call_index'):
self.call_index = 0
self.call_index += 1
return {'time': float(self.call_index)}
sut._get_time = sut.get_time
sut.get_time = mock_get_time
# 5.5 is the average of 1, 2, 3 ... 10
self.assertEqual(sut.check()['value'], 5.5)
sut.get_time = sut._get_time
|
b4ldr/atlas-traceroute-to-bgp
|
refs/heads/master
|
main.py
|
1
|
#!/usr/bin/env python
import logging
from trace2bgp.atlas import Asn, Asns
from trace2bgp.utils import get_sagan_objects, get_cousteau_object
from prettytable import PrettyTable
from argparse import ArgumentParser
from ripe.atlas.cousteau import AtlasLatestRequest
def get_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='count' )
parser.add_argument('--pyasn-file')
parser.add_argument('msm_id')
return parser.parse_args()
def set_log_level(args_level):
log_level = logging.ERROR
lib_log_level = logging.ERROR
if args_level == 1:
log_level = logging.WARN
elif args_level == 2:
log_level = logging.INFO
lib_log_level = logging.WARN
elif args_level == 3:
log_level = logging.DEBUG
elif args_level > 3:
lib_log_level = logging.DEBUG
logging.basicConfig(level=log_level)
logging.getLogger('requests').setLevel(lib_log_level)
logging.getLogger('urllib3').setLevel(lib_log_level)
def report(headings, entries, sortby=None, reversesort=False):
if len(entries) == 0:
return '\nNo Data'
table = PrettyTable(headings)
for heading in headings:
table.align[heading] = 'l'
for entry in entries:
table.add_row(entry)
if sortby:
return table.get_string(sortby=sortby, reversesort=reversesort)
else:
return table.get_string()
def reachability_report(asns):
headings = ['ANS', 'unique paths', 'Min', 'Max', 'Transit ASNs', 'Downstream ASNs']
sortby = 'Downstream ASNs'
entries = []
entries = [[asn.id, len(asn.paths), min(map(len, asn.paths)),
max(map(len, asn.paths)), len(asn.transit_asns), len(asn.downstream_asns)]
for asn in asns]
return report(headings, entries, sortby)
def main():
args = get_args()
set_log_level(args.verbose)
cousteau_object = get_cousteau_object(args.msm_id)
sagan_object = get_sagan_objects(cousteau_object)
asns = Asns(sagan_object, args.pyasn_file)
print reachability_report(asns.asns)
print '{} ANS analysed'.format(len(asns))
if __name__ == '__main__':
main()
|
cauchycui/scikit-learn
|
refs/heads/master
|
benchmarks/bench_plot_approximate_neighbors.py
|
85
|
"""
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
p1 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[0])
p2 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[1])
p3 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[2])
labels = ['n_estimators=' + str(params_list[0]['n_estimators']) +
', n_candidates=' + str(params_list[0]['n_candidates']),
'n_estimators=' + str(params_list[1]['n_estimators']) +
', n_candidates=' + str(params_list[1]['n_candidates']),
'n_estimators=' + str(params_list[2]['n_estimators']) +
', n_candidates=' + str(params_list[2]['n_candidates'])]
# Plot precision
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
kans/birgo
|
refs/heads/master
|
deps/breakpad/src/tools/gyp/test/actions-subdir/src/subdir/make-subdir-file.py
|
489
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = 'Hello from make-subdir-file.py\n'
open(sys.argv[1], 'wb').write(contents)
|
edgarcosta/endomorphisms
|
refs/heads/master
|
endomorphisms/bounds/Genus2Factors.py
|
1
|
"""
* Bound functionality
*
* Copyright (C) 2016-2017
* Edgar Costa (edgarcosta@math.dartmouth.edu)
* Davide Lombardo (davide.lombardo@math.u-psud.fr)
* Jeroen Sijsling (jeroen.sijsling@uni-ulm.de)
*
* See LICENSE.txt for license details.
"""
from sage.all import is_prime
def Genus2FactorTwistedLPolys(LPolys) : # the input LPolys need to be in genus 3!
Genus2LPolys = [0 for k in range(0,maxP)]
for p in range (2,maxP) :
if is_prime(p) and LPolys[p] <> 0 :
q = LPolys[p]
# print q
# print "Twisting the L-poly"
q = twistPolynomial(q, extensionBounds[3])
# print "Prime", p, "Polynomial", q;
pieces = q.factor()
for piece in pieces :
if piece[0].degree() == 4 :
Genus2LPolys[p] = piece[0];
return Genus2LPolys
|
tanmaykm/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_library_content.py
|
39
|
# -*- coding: utf-8 -*-
"""
Basic unit tests for LibraryContentModule
Higher-level tests are in `cms/djangoapps/contentstore/tests/test_libraries.py`.
"""
from bson.objectid import ObjectId
from mock import Mock, patch
from xblock.fragment import Fragment
from xblock.runtime import Runtime as VanillaRuntime
from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE, LibraryContentDescriptor
from xmodule.library_tools import LibraryToolsService
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import LibraryFactory, CourseFactory
from xmodule.modulestore.tests.utils import MixedSplitTestCase
from xmodule.tests import get_test_system
from xmodule.validation import StudioValidationMessage
from xmodule.x_module import AUTHOR_VIEW
from search.search_engine_base import SearchEngine
dummy_render = lambda block, _: Fragment(block.data) # pylint: disable=invalid-name
class LibraryContentTest(MixedSplitTestCase):
"""
Base class for tests of LibraryContentModule (library_content_module.py)
"""
def setUp(self):
super(LibraryContentTest, self).setUp()
self.tools = LibraryToolsService(self.store)
self.library = LibraryFactory.create(modulestore=self.store)
self.lib_blocks = [
self.make_block("html", self.library, data="Hello world from block {}".format(i))
for i in range(1, 5)
]
self.course = CourseFactory.create(modulestore=self.store)
self.chapter = self.make_block("chapter", self.course)
self.sequential = self.make_block("sequential", self.chapter)
self.vertical = self.make_block("vertical", self.sequential)
self.lc_block = self.make_block(
"library_content",
self.vertical,
max_count=1,
source_library_id=unicode(self.library.location.library_key)
)
def _bind_course_module(self, module):
"""
Bind a module (part of self.course) so we can access student-specific data.
"""
module_system = get_test_system(course_id=module.location.course_key)
module_system.descriptor_runtime = module.runtime._descriptor_system # pylint: disable=protected-access
module_system._services['library_tools'] = self.tools # pylint: disable=protected-access
def get_module(descriptor):
"""Mocks module_system get_module function"""
sub_module_system = get_test_system(course_id=module.location.course_key)
sub_module_system.get_module = get_module
sub_module_system.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.bind_for_student(sub_module_system, self.user_id)
return descriptor
module_system.get_module = get_module
module.xmodule_runtime = module_system
class LibraryContentModuleTestMixin(object):
"""
Basic unit tests for LibraryContentModule
"""
problem_types = [
["multiplechoiceresponse"], ["optionresponse"], ["optionresponse", "coderesponse"],
["coderesponse", "optionresponse"]
]
problem_type_lookup = {}
def _get_capa_problem_type_xml(self, *args):
""" Helper function to create empty CAPA problem definition """
problem = "<problem>"
for problem_type in args:
problem += "<{problem_type}></{problem_type}>".format(problem_type=problem_type)
problem += "</problem>"
return problem
def _create_capa_problems(self):
"""
Helper function to create a set of capa problems to test against.
Creates four blocks total.
"""
self.problem_type_lookup = {}
for problem_type in self.problem_types:
block = self.make_block("problem", self.library, data=self._get_capa_problem_type_xml(*problem_type))
self.problem_type_lookup[block.location] = problem_type
def test_lib_content_block(self):
"""
Test that blocks from a library are copied and added as children
"""
# Check that the LibraryContent block has no children initially
# Normally the children get added when the "source_libraries" setting
# is updated, but the way we do it through a factory doesn't do that.
self.assertEqual(len(self.lc_block.children), 0)
# Update the LibraryContent module:
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
# Check that all blocks from the library are now children of the block:
self.assertEqual(len(self.lc_block.children), len(self.lib_blocks))
def test_children_seen_by_a_user(self):
"""
Test that each student sees only one block as a child of the LibraryContent block.
"""
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
# Make sure the runtime knows that the block's children vary per-user:
self.assertTrue(self.lc_block.has_dynamic_children())
self.assertEqual(len(self.lc_block.children), len(self.lib_blocks))
# Check how many children each user will see:
self.assertEqual(len(self.lc_block.get_child_descriptors()), 1)
# Check that get_content_titles() doesn't return titles for hidden/unused children
self.assertEqual(len(self.lc_block.get_content_titles()), 1)
def test_validation_of_course_libraries(self):
"""
Test that the validation method of LibraryContent blocks can validate
the source_library setting.
"""
# When source_library_id is blank, the validation summary should say this block needs to be configured:
self.lc_block.source_library_id = ""
result = self.lc_block.validate()
self.assertFalse(result) # Validation fails due to at least one warning/message
self.assertTrue(result.summary)
self.assertEqual(StudioValidationMessage.NOT_CONFIGURED, result.summary.type)
# When source_library_id references a non-existent library, we should get an error:
self.lc_block.source_library_id = "library-v1:BAD+WOLF"
result = self.lc_block.validate()
self.assertFalse(result) # Validation fails due to at least one warning/message
self.assertTrue(result.summary)
self.assertEqual(StudioValidationMessage.ERROR, result.summary.type)
self.assertIn("invalid", result.summary.text)
# When source_library_id is set but the block needs to be updated, the summary should say so:
self.lc_block.source_library_id = unicode(self.library.location.library_key)
result = self.lc_block.validate()
self.assertFalse(result) # Validation fails due to at least one warning/message
self.assertTrue(result.summary)
self.assertEqual(StudioValidationMessage.WARNING, result.summary.type)
self.assertIn("out of date", result.summary.text)
# Now if we update the block, all validation should pass:
self.lc_block.refresh_children()
self.assertTrue(self.lc_block.validate())
def test_validation_of_matching_blocks(self):
"""
Test that the validation method of LibraryContent blocks can warn
the user about problems with other settings (max_count and capa_type).
"""
# Set max_count to higher value than exists in library
self.lc_block.max_count = 50
# In the normal studio editing process, editor_saved() calls refresh_children at this point
self.lc_block.refresh_children()
result = self.lc_block.validate()
self.assertFalse(result) # Validation fails due to at least one warning/message
self.assertTrue(result.summary)
self.assertEqual(StudioValidationMessage.WARNING, result.summary.type)
self.assertIn("only 4 matching problems", result.summary.text)
# Add some capa problems so we can check problem type validation messages
self.lc_block.max_count = 1
self._create_capa_problems()
self.lc_block.refresh_children()
self.assertTrue(self.lc_block.validate())
# Existing problem type should pass validation
self.lc_block.max_count = 1
self.lc_block.capa_type = 'multiplechoiceresponse'
self.lc_block.refresh_children()
self.assertTrue(self.lc_block.validate())
# ... unless requested more blocks than exists in library
self.lc_block.max_count = 10
self.lc_block.capa_type = 'multiplechoiceresponse'
self.lc_block.refresh_children()
result = self.lc_block.validate()
self.assertFalse(result) # Validation fails due to at least one warning/message
self.assertTrue(result.summary)
self.assertEqual(StudioValidationMessage.WARNING, result.summary.type)
self.assertIn("only 1 matching problem", result.summary.text)
# Missing problem type should always fail validation
self.lc_block.max_count = 1
self.lc_block.capa_type = 'customresponse'
self.lc_block.refresh_children()
result = self.lc_block.validate()
self.assertFalse(result) # Validation fails due to at least one warning/message
self.assertTrue(result.summary)
self.assertEqual(StudioValidationMessage.WARNING, result.summary.type)
self.assertIn("no matching problem types", result.summary.text)
def test_capa_type_filtering(self):
"""
Test that the capa type filter is actually filtering children
"""
self._create_capa_problems()
self.assertEqual(len(self.lc_block.children), 0) # precondition check
self.lc_block.capa_type = "multiplechoiceresponse"
self.lc_block.refresh_children()
self.assertEqual(len(self.lc_block.children), 1)
self.lc_block.capa_type = "optionresponse"
self.lc_block.refresh_children()
self.assertEqual(len(self.lc_block.children), 3)
self.lc_block.capa_type = "coderesponse"
self.lc_block.refresh_children()
self.assertEqual(len(self.lc_block.children), 2)
self.lc_block.capa_type = "customresponse"
self.lc_block.refresh_children()
self.assertEqual(len(self.lc_block.children), 0)
self.lc_block.capa_type = ANY_CAPA_TYPE_VALUE
self.lc_block.refresh_children()
self.assertEqual(len(self.lc_block.children), len(self.lib_blocks) + 4)
def test_non_editable_settings(self):
"""
Test the settings that are marked as "non-editable".
"""
non_editable_metadata_fields = self.lc_block.non_editable_metadata_fields
self.assertIn(LibraryContentDescriptor.mode, non_editable_metadata_fields)
self.assertNotIn(LibraryContentDescriptor.display_name, non_editable_metadata_fields)
@patch('xmodule.library_tools.SearchEngine.get_search_engine', Mock(return_value=None, autospec=True))
class TestLibraryContentModuleNoSearchIndex(LibraryContentModuleTestMixin, LibraryContentTest):
"""
Tests for library container when no search index is available.
Tests fallback low-level CAPA problem introspection
"""
pass
search_index_mock = Mock(spec=SearchEngine) # pylint: disable=invalid-name
@patch('xmodule.library_tools.SearchEngine.get_search_engine', Mock(return_value=search_index_mock, autospec=True))
class TestLibraryContentModuleWithSearchIndex(LibraryContentModuleTestMixin, LibraryContentTest):
"""
Tests for library container with mocked search engine response.
"""
def _get_search_response(self, field_dictionary=None):
""" Mocks search response as returned by search engine """
target_type = field_dictionary.get('problem_types')
matched_block_locations = [
key for key, problem_types in
self.problem_type_lookup.items() if target_type in problem_types
]
return {
'results': [
{'data': {'id': str(location)}} for location in matched_block_locations
]
}
def setUp(self):
""" Sets up search engine mock """
super(TestLibraryContentModuleWithSearchIndex, self).setUp()
search_index_mock.search = Mock(side_effect=self._get_search_response)
@patch(
'xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.render', VanillaRuntime.render
)
@patch('xmodule.html_module.HtmlModule.author_view', dummy_render, create=True)
@patch('xmodule.x_module.DescriptorSystem.applicable_aside_types', lambda self, block: [])
class TestLibraryContentRender(LibraryContentTest):
"""
Rendering unit tests for LibraryContentModule
"""
def test_preview_view(self):
""" Test preview view rendering """
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
self.assertEqual(len(self.lc_block.children), len(self.lib_blocks))
self._bind_course_module(self.lc_block)
rendered = self.lc_block.render(AUTHOR_VIEW, {'root_xblock': self.lc_block})
self.assertIn("Hello world from block 1", rendered.content)
def test_author_view(self):
""" Test author view rendering """
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
self.assertEqual(len(self.lc_block.children), len(self.lib_blocks))
self._bind_course_module(self.lc_block)
rendered = self.lc_block.render(AUTHOR_VIEW, {})
self.assertEqual("", rendered.content) # content should be empty
self.assertEqual("LibraryContentAuthorView", rendered.js_init_fn) # but some js initialization should happen
class TestLibraryContentAnalytics(LibraryContentTest):
"""
Test analytics features of LibraryContentModule
"""
def setUp(self):
super(TestLibraryContentAnalytics, self).setUp()
self.publisher = Mock()
self.lc_block.refresh_children()
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
self.lc_block.xmodule_runtime.publish = self.publisher
def _assert_event_was_published(self, event_type):
"""
Check that a LibraryContentModule analytics event was published by self.lc_block.
"""
self.assertTrue(self.publisher.called)
self.assertTrue(len(self.publisher.call_args[0]), 3)
_, event_name, event_data = self.publisher.call_args[0]
self.assertEqual(event_name, "edx.librarycontentblock.content.{}".format(event_type))
self.assertEqual(event_data["location"], unicode(self.lc_block.location))
return event_data
def test_assigned_event(self):
"""
Test the "assigned" event emitted when a student is assigned specific blocks.
"""
# In the beginning was the lc_block and it assigned one child to the student:
child = self.lc_block.get_child_descriptors()[0]
child_lib_location, child_lib_version = self.store.get_block_original_usage(child.location)
self.assertIsInstance(child_lib_version, ObjectId)
event_data = self._assert_event_was_published("assigned")
block_info = {
"usage_key": unicode(child.location),
"original_usage_key": unicode(child_lib_location),
"original_usage_version": unicode(child_lib_version),
"descendants": [],
}
self.assertEqual(event_data, {
"location": unicode(self.lc_block.location),
"added": [block_info],
"result": [block_info],
"previous_count": 0,
"max_count": 1,
})
self.publisher.reset_mock()
# Now increase max_count so that one more child will be added:
self.lc_block.max_count = 2
# Clear the cache (only needed because we skip saving/re-loading the block) pylint: disable=protected-access
del self.lc_block._xmodule._selected_set
children = self.lc_block.get_child_descriptors()
self.assertEqual(len(children), 2)
child, new_child = children if children[0].location == child.location else reversed(children)
event_data = self._assert_event_was_published("assigned")
self.assertEqual(event_data["added"][0]["usage_key"], unicode(new_child.location))
self.assertEqual(len(event_data["result"]), 2)
self.assertEqual(event_data["previous_count"], 1)
self.assertEqual(event_data["max_count"], 2)
def test_assigned_event_published(self):
"""
Same as test_assigned_event but uses the published branch
"""
self.store.publish(self.course.location, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
self.lc_block.xmodule_runtime.publish = self.publisher
self.test_assigned_event()
def test_assigned_descendants(self):
"""
Test the "assigned" event emitted includes descendant block information.
"""
# Replace the blocks in the library with a block that has descendants:
with self.store.bulk_operations(self.library.location.library_key):
self.library.children = []
main_vertical = self.make_block("vertical", self.library)
inner_vertical = self.make_block("vertical", main_vertical)
html_block = self.make_block("html", inner_vertical)
problem_block = self.make_block("problem", inner_vertical)
self.lc_block.refresh_children()
# Reload lc_block and set it up for a student:
self.lc_block = self.store.get_item(self.lc_block.location)
self._bind_course_module(self.lc_block)
self.lc_block.xmodule_runtime.publish = self.publisher
# Get the keys of each of our blocks, as they appear in the course:
course_usage_main_vertical = self.lc_block.children[0]
course_usage_inner_vertical = self.store.get_item(course_usage_main_vertical).children[0]
inner_vertical_in_course = self.store.get_item(course_usage_inner_vertical)
course_usage_html = inner_vertical_in_course.children[0]
course_usage_problem = inner_vertical_in_course.children[1]
# Trigger a publish event:
self.lc_block.get_child_descriptors()
event_data = self._assert_event_was_published("assigned")
for block_list in (event_data["added"], event_data["result"]):
self.assertEqual(len(block_list), 1) # main_vertical is the only root block added, and is the only result.
self.assertEqual(block_list[0]["usage_key"], unicode(course_usage_main_vertical))
# Check that "descendants" is a flat, unordered list of all of main_vertical's descendants:
descendants_expected = (
(inner_vertical.location, course_usage_inner_vertical),
(html_block.location, course_usage_html),
(problem_block.location, course_usage_problem),
)
descendant_data_expected = {}
for lib_key, course_usage_key in descendants_expected:
descendant_data_expected[unicode(course_usage_key)] = {
"usage_key": unicode(course_usage_key),
"original_usage_key": unicode(lib_key),
"original_usage_version": unicode(self.store.get_block_original_usage(course_usage_key)[1]),
}
self.assertEqual(len(block_list[0]["descendants"]), len(descendant_data_expected))
for descendant in block_list[0]["descendants"]:
self.assertEqual(descendant, descendant_data_expected.get(descendant["usage_key"]))
def test_removed_overlimit(self):
"""
Test the "removed" event emitted when we un-assign blocks previously assigned to a student.
We go from one blocks assigned to none because max_count has been decreased.
"""
# Decrease max_count to 1, causing the block to be overlimit:
self.lc_block.get_child_descriptors() # This line is needed in the test environment or the change has no effect
self.publisher.reset_mock() # Clear the "assigned" event that was just published.
self.lc_block.max_count = 0
# Clear the cache (only needed because we skip saving/re-loading the block) pylint: disable=protected-access
del self.lc_block._xmodule._selected_set
# Check that the event says that one block was removed, leaving no blocks left:
children = self.lc_block.get_child_descriptors()
self.assertEqual(len(children), 0)
event_data = self._assert_event_was_published("removed")
self.assertEqual(len(event_data["removed"]), 1)
self.assertEqual(event_data["result"], [])
self.assertEqual(event_data["reason"], "overlimit")
def test_removed_invalid(self):
"""
Test the "removed" event emitted when we un-assign blocks previously assigned to a student.
We go from two blocks assigned, to one because the others have been deleted from the library.
"""
# Start by assigning two blocks to the student:
self.lc_block.get_child_descriptors() # This line is needed in the test environment or the change has no effect
self.lc_block.max_count = 2
# Clear the cache (only needed because we skip saving/re-loading the block) pylint: disable=protected-access
del self.lc_block._xmodule._selected_set
initial_blocks_assigned = self.lc_block.get_child_descriptors()
self.assertEqual(len(initial_blocks_assigned), 2)
self.publisher.reset_mock() # Clear the "assigned" event that was just published.
# Now make sure that one of the assigned blocks will have to be un-assigned.
# To cause an "invalid" event, we delete all blocks from the content library
# except for one of the two already assigned to the student:
keep_block_key = initial_blocks_assigned[0].location
keep_block_lib_usage_key, keep_block_lib_version = self.store.get_block_original_usage(keep_block_key)
self.assertIsNotNone(keep_block_lib_usage_key)
deleted_block_key = initial_blocks_assigned[1].location
self.library.children = [keep_block_lib_usage_key]
self.store.update_item(self.library, self.user_id)
self.lc_block.refresh_children()
# Clear the cache (only needed because we skip saving/re-loading the block) pylint: disable=protected-access
del self.lc_block._xmodule._selected_set
# Check that the event says that one block was removed, leaving one block left:
children = self.lc_block.get_child_descriptors()
self.assertEqual(len(children), 1)
event_data = self._assert_event_was_published("removed")
self.assertEqual(event_data["removed"], [{
"usage_key": unicode(deleted_block_key),
"original_usage_key": None, # Note: original_usage_key info is sadly unavailable because the block has been
# deleted so that info can no longer be retrieved
"original_usage_version": None,
"descendants": [],
}])
self.assertEqual(event_data["result"], [{
"usage_key": unicode(keep_block_key),
"original_usage_key": unicode(keep_block_lib_usage_key),
"original_usage_version": unicode(keep_block_lib_version),
"descendants": [],
}])
self.assertEqual(event_data["reason"], "invalid")
|
stefan-jonasson/home-assistant
|
refs/heads/dev
|
homeassistant/components/notify/kodi.py
|
4
|
"""
Kodi notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.kodi/
"""
import asyncio
import logging
import aiohttp
import voluptuous as vol
from homeassistant.const import (
ATTR_ICON, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
CONF_PROXY_SSL)
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_DATA, PLATFORM_SCHEMA,
BaseNotificationService)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['jsonrpc-async==0.6']
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 8080
DEFAULT_PROXY_SSL = False
DEFAULT_TIMEOUT = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PROXY_SSL, default=DEFAULT_PROXY_SSL): cv.boolean,
vol.Inclusive(CONF_USERNAME, 'auth'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'auth'): cv.string,
})
ATTR_DISPLAYTIME = 'displaytime'
@asyncio.coroutine
def async_get_service(hass, config, discovery_info=None):
"""Return the notify service."""
url = '{}:{}'.format(config.get(CONF_HOST), config.get(CONF_PORT))
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
encryption = config.get(CONF_PROXY_SSL)
if host.startswith('http://') or host.startswith('https://'):
host = host.lstrip('http://').lstrip('https://')
_LOGGER.warning(
"Kodi host name should no longer contain http:// See updated "
"definitions here: "
"https://home-assistant.io/components/media_player.kodi/")
http_protocol = 'https' if encryption else 'http'
url = '{}://{}:{}/jsonrpc'.format(http_protocol, host, port)
if username is not None:
auth = aiohttp.BasicAuth(username, password)
else:
auth = None
return KodiNotificationService(hass, url, auth)
class KodiNotificationService(BaseNotificationService):
"""Implement the notification service for Kodi."""
def __init__(self, hass, url, auth=None):
"""Initialize the service."""
import jsonrpc_async
self._url = url
kwargs = {
'timeout': DEFAULT_TIMEOUT,
'session': async_get_clientsession(hass),
}
if auth is not None:
kwargs['auth'] = auth
self._server = jsonrpc_async.Server(self._url, **kwargs)
@asyncio.coroutine
def async_send_message(self, message="", **kwargs):
"""Send a message to Kodi."""
import jsonrpc_async
try:
data = kwargs.get(ATTR_DATA) or {}
displaytime = data.get(ATTR_DISPLAYTIME, 10000)
icon = data.get(ATTR_ICON, "info")
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
yield from self._server.GUI.ShowNotification(
title, message, icon, displaytime)
except jsonrpc_async.TransportError:
_LOGGER.warning("Unable to fetch Kodi data. Is Kodi online?")
|
gerald-yang/ubuntu-iotivity-demo
|
refs/heads/master
|
grovepi/pygrovepi/grove_firmware_version_check.py
|
7
|
#!/usr/bin/env python
#
# GrovePi Example for checking the firmware for the GrovePi
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
#
# NOTE: If you get a version of 255.255.255, they try running the script again, if the issue still persists then you are using an old deprecated firmware
import grovepi
try:
print ("GrovePi has firmware version:", grovepi.version())
except KeyboardInterrupt:
print ("KeyboardInterrupt")
except IOError:
print ("Error")
|
andresriancho/PyGithub
|
refs/heads/master
|
github/tests/Logging_.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import logging
import github
import Framework
class Logging(Framework.BasicTestCase):
class MockHandler:
def __init__(self):
self.level = logging.DEBUG
self.handled = None
def handle(self, record):
self.handled = record.getMessage()
def setUp(self):
Framework.BasicTestCase.setUp(self)
logger = logging.getLogger("github")
logger.setLevel(logging.DEBUG)
self.__handler = self.MockHandler()
logger.addHandler(self.__handler)
def testLoggingWithBasicAuthentication(self):
self.assertEqual(github.Github(self.login, self.password).get_user().name, "Vincent Jacques")
self.assertEqual(self.__handler.handled, u'GET https://api.github.com/user {\'Authorization\': \'Basic (login and password removed)\'} None ==> 200 {\'status\': \'200 OK\', \'content-length\': \'806\', \'x-github-media-type\': \'github.beta; format=json\', \'x-content-type-options\': \'nosniff\', \'vary\': \'Accept, Authorization, Cookie\', \'x-ratelimit-remaining\': \'4993\', \'server\': \'nginx\', \'last-modified\': \'Fri, 14 Sep 2012 18:47:46 GMT\', \'connection\': \'keep-alive\', \'x-ratelimit-limit\': \'5000\', \'etag\': \'"434dfe5d3f50558fe3cea087cb95c401"\', \'cache-control\': \'private, s-maxage=60, max-age=60\', \'date\': \'Mon, 17 Sep 2012 17:12:32 GMT\', \'content-type\': \'application/json; charset=utf-8\'} {"owned_private_repos":3,"disk_usage":18612,"following":28,"type":"User","public_repos":13,"location":"Paris, France","company":"Criteo","avatar_url":"https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png","plan":{"space":614400,"private_repos":5,"name":"micro","collaborators":1},"blog":"http://vincent-jacques.net","login":"jacquev6","public_gists":3,"html_url":"https://github.com/jacquev6","hireable":false,"created_at":"2010-07-09T06:10:06Z","private_gists":5,"followers":13,"name":"Vincent Jacques","email":"vincent@vincent-jacques.net","bio":"","total_private_repos":3,"collaborators":0,"gravatar_id":"b68de5ae38616c296fa345d2b9df2225","id":327146,"url":"https://api.github.com/users/jacquev6"}')
def testLoggingWithOAuthAuthentication(self):
self.assertEqual(github.Github(self.oauth_token).get_user().name, "Vincent Jacques")
self.assertEqual(self.__handler.handled, u'GET https://api.github.com/user {\'Authorization\': \'token (oauth token removed)\'} None ==> 200 {\'status\': \'200 OK\', \'x-ratelimit-remaining\': \'4993\', \'x-github-media-type\': \'github.beta; format=json\', \'x-content-type-options\': \'nosniff\', \'vary\': \'Accept, Authorization, Cookie\', \'content-length\': \'628\', \'server\': \'nginx\', \'last-modified\': \'Tue, 25 Sep 2012 07:42:42 GMT\', \'connection\': \'keep-alive\', \'x-ratelimit-limit\': \'5000\', \'etag\': \'"c23ad6b5815fc3d6ec6341c4a47afe85"\', \'cache-control\': \'private, max-age=60, s-maxage=60\', \'date\': \'Tue, 25 Sep 2012 20:36:54 GMT\', \'x-oauth-scopes\': \'\', \'content-type\': \'application/json; charset=utf-8\', \'x-accepted-oauth-scopes\': \'user\'} {"type":"User","bio":"","html_url":"https://github.com/jacquev6","login":"jacquev6","followers":14,"company":"Criteo","blog":"http://vincent-jacques.net","public_repos":13,"created_at":"2010-07-09T06:10:06Z","avatar_url":"https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png","email":"vincent@vincent-jacques.net","following":29,"name":"Vincent Jacques","gravatar_id":"b68de5ae38616c296fa345d2b9df2225","hireable":false,"id":327146,"public_gists":3,"location":"Paris, France","url":"https://api.github.com/users/jacquev6"}')
def testLoggingWithoutAuthentication(self):
self.assertEqual(github.Github().get_user("jacquev6").name, "Vincent Jacques")
self.assertEqual(self.__handler.handled, u'GET https://api.github.com/users/jacquev6 {} None ==> 200 {\'status\': \'200 OK\', \'content-length\': \'628\', \'x-github-media-type\': \'github.beta; format=json\', \'x-content-type-options\': \'nosniff\', \'vary\': \'Accept\', \'x-ratelimit-remaining\': \'4989\', \'server\': \'nginx\', \'last-modified\': \'Tue, 25 Sep 2012 07:42:42 GMT\', \'connection\': \'keep-alive\', \'x-ratelimit-limit\': \'5000\', \'etag\': \'"9bd085221a16b6d2ea95e72634c3c1ac"\', \'cache-control\': \'public, max-age=60, s-maxage=60\', \'date\': \'Tue, 25 Sep 2012 20:38:56 GMT\', \'content-type\': \'application/json; charset=utf-8\'} {"type":"User","html_url":"https://github.com/jacquev6","login":"jacquev6","followers":14,"company":"Criteo","created_at":"2010-07-09T06:10:06Z","email":"vincent@vincent-jacques.net","hireable":false,"avatar_url":"https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png","public_gists":3,"bio":"","following":29,"name":"Vincent Jacques","blog":"http://vincent-jacques.net","gravatar_id":"b68de5ae38616c296fa345d2b9df2225","id":327146,"public_repos":13,"location":"Paris, France","url":"https://api.github.com/users/jacquev6"}')
def testLoggingWithBaseUrl(self):
# ReplayData forged, not recorded
self.assertEqual(github.Github(base_url="http://my.enterprise.com/my/prefix").get_user("jacquev6").name, "Vincent Jacques")
self.assertEqual(self.__handler.handled, u'GET http://my.enterprise.com/my/prefix/users/jacquev6 {} None ==> 200 {\'status\': \'200 OK\', \'content-length\': \'628\', \'x-github-media-type\': \'github.beta; format=json\', \'x-content-type-options\': \'nosniff\', \'vary\': \'Accept\', \'x-ratelimit-remaining\': \'4989\', \'server\': \'nginx\', \'last-modified\': \'Tue, 25 Sep 2012 07:42:42 GMT\', \'connection\': \'keep-alive\', \'x-ratelimit-limit\': \'5000\', \'etag\': \'"9bd085221a16b6d2ea95e72634c3c1ac"\', \'cache-control\': \'public, max-age=60, s-maxage=60\', \'date\': \'Tue, 25 Sep 2012 20:38:56 GMT\', \'content-type\': \'application/json; charset=utf-8\'} {"type":"User","html_url":"https://github.com/jacquev6","login":"jacquev6","followers":14,"company":"Criteo","created_at":"2010-07-09T06:10:06Z","email":"vincent@vincent-jacques.net","hireable":false,"avatar_url":"https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png","public_gists":3,"bio":"","following":29,"name":"Vincent Jacques","blog":"http://vincent-jacques.net","gravatar_id":"b68de5ae38616c296fa345d2b9df2225","id":327146,"public_repos":13,"location":"Paris, France","url":"https://api.github.com/users/jacquev6"}')
|
wd5/jangr
|
refs/heads/master
|
_django/core/management/base.py
|
248
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
if self.can_import_settings:
try:
from django.utils import translation
translation.activate('en-us')
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
def copy_helper(style, app_or_project, name, directory, other_name=''):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
"""
# style -- A color style object (see django.core.management.color).
# app_or_project -- The string 'app' or 'project'.
# name -- The name of the application or project.
# directory -- The directory to which the layout template should be copied.
# other_name -- When copying an application layout, this should be the name
# of the project.
import re
import shutil
other = {'project': 'app', 'app': 'project'}[app_or_project]
if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
top_dir = os.path.join(directory, name)
try:
os.mkdir(top_dir)
except OSError, e:
raise CommandError(e)
# Determine where the app or project templates are. Use
# django.__path__[0] because we don't know into which directory
# django has been installed.
template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
if relative_dir:
os.mkdir(os.path.join(top_dir, relative_dir))
for subdir in subdirs[:]:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if not f.endswith('.py'):
# Ignore .pyc, .pyo, .py.class etc, as they cause various
# breakages.
continue
path_old = os.path.join(d, f)
path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
|
benjaminrigaud/django
|
refs/heads/master
|
django/db/models/sql/query.py
|
3
|
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from collections import Mapping, OrderedDict
import copy
import warnings
from django.core.exceptions import FieldError
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models.constants import LOOKUP_SEP
from django.db.models.aggregates import refs_aggregate
from django.db.models.expressions import ExpressionNode
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import Q
from django.db.models.related import PathInfo
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE,
ORDER_PATTERN, JoinInfo, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin, Col
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR, EmptyWhere)
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %s>" % self
def __str__(self):
_type = dict if isinstance(self.params, Mapping) else tuple
return self.sql % _type(self.params)
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# type they are. The key is the alias of the joined table (possibly
# the table name) and the value is JoinInfo from constants.py.
self.alias_map = {}
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
# Select and related select clauses as SelectInfo instances.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), annotate(),
# subqueries...)
self.select = []
# The related_select_cols is used for columns needed for
# select_related - this is populated in the compile stage.
self.related_select_cols = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
# SQL aggregate-related attributes
# The _aggregates will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.aggregates property).
self._aggregates = None # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .aggregates
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def aggregates(self):
if self._aggregates is None:
self._aggregates = OrderedDict()
return self._aggregates
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.select = self.select[:]
obj.related_select_cols = []
obj.tables = self.tables[:]
obj.where = self.where.clone()
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = self.having.clone()
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.related_select_cols = []
obj._aggregates = self._aggregates.copy() if self._aggregates is not None else None
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj._extra = self._extra.copy() if self._extra is not None else None
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
if 'alias_prefix' in self.__dict__:
obj.alias_prefix = self.alias_prefix
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
backend_converters = connection.ops.get_db_converters(aggregate.field.get_internal_type())
field_converters = aggregate.field.get_db_converters(connection)
for converter in backend_converters:
value = converter(value, aggregate.field)
for converter in field_converters:
value = converter(value, connection)
return value
def get_aggregation(self, using, force_subq=False):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None or force_subq:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
if not force_subq:
# In forced subq case the ordering and limits will likely
# affect the results.
obj.clear_ordering(True)
obj.clear_limits()
obj.select_for_update = False
obj.select_related = False
obj.related_select_cols = []
relabels = dict((t, 'subquery') for t in self.tables)
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregates[alias] = aggregate.relabeled_clone(relabels)
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self._extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_for_update = False
query.select_related = False
query.related_select_cols = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict(
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
)
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_filters(self):
return self.where or self.having
def has_results(self, using):
q = self.clone()
if not q.distinct:
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.tables)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == self.INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
for alias in rhs.tables[1:]:
table, _, join_type, lhs, join_cols, nullable, join_field = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
lhs = change_map.get(lhs, lhs)
new_alias = self.join(
(lhs, table, join_cols), reuse=reuse,
nullable=nullable, join_field=join_field)
if join_type == self.INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = rhs.where.clone()
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col, field in rhs.select:
if isinstance(col, (list, tuple)):
new_col = change_map.get(col[0], col[0]), col[1]
self.select.append(SelectInfo(new_col, field))
else:
new_col = col.relabeled_clone(change_map)
self.select.append(SelectInfo(new_col, field))
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self._extra and rhs._extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
if is_reverse_o2o(source):
cur_model = source.model
else:
cur_model = source.rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in six.iteritems(seen):
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in six.iteritems(must_include):
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in six.iteritems(workset):
callback(target, model, values)
else:
for model, values in six.iteritems(must_include):
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in six.iteritems(seen):
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_cols[0][1] is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].lhs_alias
parent_louter = (
parent_alias
and self.alias_map[parent_alias].join_type == self.LOUTER)
already_louter = self.alias_map[alias].join_type == self.LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
data = self.alias_map[alias]._replace(join_type=self.LOUTER)
self.alias_map[alias] = data
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map.keys()
if (self.alias_map[join].lhs_alias == alias
and join not in aliases))
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == self.LOUTER:
self.alias_map[alias] = self.alias_map[alias]._replace(join_type=self.INNER)
parent_alias = self.alias_map[alias].lhs_alias
if self.alias_map[parent_alias].join_type == self.INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
def relabel_column(col):
if isinstance(col, (list, tuple)):
old_alias = col[0]
return (change_map.get(old_alias, old_alias), col[1])
else:
return col.relabeled_clone(change_map)
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
if self.group_by:
self.group_by = [relabel_column(col) for col in self.group_by]
self.select = [SelectInfo(relabel_column(s.col), s.field)
for s in self.select]
if self._aggregates:
self._aggregates = OrderedDict(
(key, relabel_column(col)) for key, col in self._aggregates.items())
# 2. Rename the alias in the internal table/alias datastructures.
for ident, aliases in self.join_map.items():
del self.join_map[ident]
aliases = tuple(change_map.get(a, a) for a in aliases)
ident = (change_map.get(ident[0], ident[0]),) + ident[1:]
self.join_map[ident] = aliases
for old_alias, new_alias in six.iteritems(change_map):
alias_data = self.alias_map[old_alias]
alias_data = alias_data._replace(rhs_alias=new_alias)
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = alias_data
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in six.iteritems(self.alias_map):
lhs = data.lhs_alias
if lhs in change_map:
data = data._replace(lhs_alias=change_map[lhs])
self.alias_map[alias] = data
def bump_prefix(self, outer_query):
"""
Changes the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
self.alias_prefix = chr(ord(self.alias_prefix) + 1)
while self.alias_prefix in self.subq_aliases:
self.alias_prefix = chr(ord(self.alias_prefix) + 1)
assert self.alias_prefix < 'Z'
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
change_map = OrderedDict()
for pos, alias in enumerate(self.tables):
new_alias = '%s%d' % (self.alias_prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, connection, reuse=None, nullable=False, join_field=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, join_cols) where 'lhs' is either an existing
table alias or a table name. 'join_cols' is a tuple of tuples containing
columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
to the SQL equivalent of::
lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
The 'reuse' parameter can be either None which means all joins
(matching the connection) are reusable, or it can be a set containing
the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure we do not generate chains like t1 LOUTER t2 INNER t3. All new
joins are created as LOUTER if nullable is True.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
The 'join_field' is the field we are joining along (if any).
"""
lhs, table, join_cols = connection
assert lhs is None or join_field is not None
existing = self.join_map.get(connection, ())
if reuse is None:
reuse = existing
else:
reuse = [a for a in existing if a in reuse]
for alias in reuse:
if join_field and self.alias_map[alias].join_field != join_field:
# The join_map doesn't contain join_field (mainly because
# fields in Query structs are problematic in pickling), so
# check that the existing join is created using the same
# join_field used for the under work join.
continue
self.ref_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, create=True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif self.alias_map[lhs].join_type == self.LOUTER or nullable:
join_type = self.LOUTER
else:
join_type = self.INNER
join = JoinInfo(table, alias, join_type, lhs, join_cols or ((None, None),), nullable,
join_field)
self.alias_map[alias] = join
if connection in self.join_map:
self.join_map[connection] += (alias,)
else:
self.join_map[connection] = (alias,)
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.get_meta()
root_alias = self.tables[0]
seen = {None: root_alias}
for field, model in opts.get_fields_with_model():
if model not in seen:
self.join_parent_model(opts, model, root_alias, seen)
self.included_inherited_models = seen
def join_parent_model(self, opts, model, alias, seen):
"""
Makes sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if chain is None:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
return seen[int_model]
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
_, _, _, joins, _ = self.setup_joins(
[link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = joins[-1]
return alias or seen[None]
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and self._aggregates and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
# Join promotion note - we must not remove any rows here, so use
# outer join if there isn't any existing join.
_, sources, opts, join_list, path = self.setup_joins(
field_list, opts, self.get_initial_alias())
# Process the join chain to see if it can be trimmed
targets, _, join_list = self.trim_joins(sources, join_list, path)
col = targets[0].column
source = sources[0]
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# We want to have the alias in SELECT clause even if mask is set.
self.append_aggregate_mask([alias])
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def prepare_lookup_value(self, value, lookups, can_reuse):
# Default lookup if none given is exact.
if len(lookups) == 0:
lookups = ['exact']
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookups[-1] not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
lookups[-1] = 'isnull'
value = True
elif callable(value):
warnings.warn(
"Passing callable arguments to queryset is deprecated.",
RemovedInDjango19Warning, stacklevel=2)
value = value()
elif isinstance(value, ExpressionNode):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self, reuse=can_reuse)
if hasattr(value, 'query') and hasattr(value.query, 'bump_prefix'):
value = value._clone()
value.query.bump_prefix(self)
if hasattr(value, 'bump_prefix'):
value = value.clone()
value.bump_prefix(self)
# For Oracle '' is equivalent to null. The check needs to be done
# at this stage because join promotion can't be done at compiler
# stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
# can do here. Similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookups[-1] == 'exact' and value == ''):
value = True
lookups[-1] = 'isnull'
return value, lookups
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (eg: 'foobar__id__icontains')
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._aggregates:
aggregate, aggregate_lookups = refs_aggregate(lookup_splitted, self.aggregates)
if aggregate:
return aggregate_lookups, (), aggregate
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) == 0:
lookup_parts = ['exact']
elif len(lookup_parts) > 1:
if not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__))
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts):
"""
Checks whether the object passed while querying is of the correct type.
If not, it raises a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not (value._meta.concrete_model == opts.concrete_model
or opts.concrete_model in value._meta.get_parent_list()
or value._meta.concrete_model in opts.get_parent_list()):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""
Checks the type of object passed to query relations.
"""
if field.rel:
# QuerySets implement is_compatible_query_object_type() to
# determine compatibility with the given field.
if hasattr(value, 'is_compatible_query_object_type'):
if not value.is_compatible_query_object_type(opts):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.model_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts)
def build_lookup(self, lookups, lhs, rhs):
lookups = lookups[:]
bilaterals = []
while lookups:
lookup = lookups[0]
if len(lookups) == 1:
final_lookup = lhs.get_lookup(lookup)
if final_lookup:
return final_lookup(lhs, rhs, bilaterals)
# We didn't find a lookup, so we are going to try get_transform
# + get_lookup('exact').
lookups.append('exact')
next = lhs.get_transform(lookup)
if next:
lhs = next(lhs, lookups)
if getattr(next, 'bilateral', False):
bilaterals.append((next, lookups))
else:
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted." %
(lookup, lhs.output_field.__class__.__name__))
lookups = lookups[1:]
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, connector=AND):
"""
Builds a WhereNode for a single filter clause, but doesn't add it
to this Query. Query.add_q() will then add this filter to the where
or having Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_netageted and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_aggregate = self.solve_lookup_type(arg)
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
value, lookups = self.prepare_lookup_value(value, lookups, can_reuse)
used_joins = getattr(value, '_used_joins', [])
clause = self.where_class()
if reffed_aggregate:
condition = self.build_lookup(lookups, reffed_aggregate, value)
if not condition:
# Backwards compat for custom lookups
assert len(lookups) == 1
condition = (reffed_aggregate, lookups[0], value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated
try:
field, sources, opts, join_list, path = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many)
self.check_related_objects(field, value, opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_list
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
if can_reuse is not None:
can_reuse.update(join_list)
used_joins = set(used_joins).union(set(join_list))
# Process the join list to see if we can remove any non-needed joins from
# the far end (fewer tables in a query is better).
targets, alias, join_list = self.trim_joins(sources, join_list, path)
if hasattr(field, 'get_lookup_constraint'):
# For now foreign keys get special treatment. This should be
# refactored when composite fields lands.
condition = field.get_lookup_constraint(self.where_class, alias, targets, sources,
lookups, value)
lookup_type = lookups[-1]
else:
assert(len(targets) == 1)
col = Col(alias, targets[0], field)
condition = self.build_lookup(lookups, col, value)
if not condition:
# Backwards compat for custom lookups
if lookups[0] not in self.query_terms:
raise FieldError(
"Join on field '%s' not permitted. Did you "
"misspell '%s' for the lookup type?" %
(col.output_field.name, lookups[0]))
if len(lookups) > 1:
raise FieldError("Nested lookup '%s' not supported." %
LOOKUP_SEP.join(lookups))
condition = (Constraint(alias, targets[0].column, field), lookups[0], value)
lookup_type = lookups[-1]
else:
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and value is True and not current_negated
if current_negated and (lookup_type != 'isnull' or value is False):
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == self.LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
clause.add(lookup_class(Col(alias, targets[0], sources[0]), False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def need_having(self, obj):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
if not self._aggregates:
return False
if not isinstance(obj, Node):
return (refs_aggregate(obj[0].split(LOOKUP_SEP), self.aggregates)[0]
or (hasattr(obj[1], 'contains_aggregate')
and obj[1].contains_aggregate(self.aggregates)))
return any(self.need_having(c) for c in obj.children)
def split_having_parts(self, q_object, negated=False):
"""
Returns a list of q_objects which need to go into the having clause
instead of the where clause. Removes the splitted out nodes from the
given q_object. Note that the q_object is altered, so cloning it is
needed.
"""
having_parts = []
for c in q_object.children[:]:
# When constructing the having nodes we need to take care to
# preserve the negation status from the upper parts of the tree
if isinstance(c, Node):
# For each negated child, flip the in_negated flag.
in_negated = c.negated ^ negated
if c.connector == OR and self.need_having(c):
# A subtree starting from OR clause must go into having in
# whole if any part of that tree references an aggregate.
q_object.children.remove(c)
having_parts.append(c)
c.negated = in_negated
else:
having_parts.extend(
self.split_having_parts(c, in_negated)[1])
elif self.need_having(c):
q_object.children.remove(c)
new_q = self.where_class(children=[c], negated=negated)
having_parts.append(new_q)
return q_object, having_parts
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for
splitting the given q_object into where and having parts and
setting up some internal variables.
"""
if not self.need_having(q_object):
where_part, having_parts = q_object, []
else:
where_part, having_parts = self.split_having_parts(
q_object.clone(), q_object.negated)
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = set(
(a for a in self.alias_map if self.alias_map[a].join_type == self.INNER))
clause, require_inner = self._add_q(where_part, self.used_aliases)
self.where.add(clause, AND)
for hp in having_parts:
clause, _ = self._add_q(hp, self.used_aliases)
self.having.add(clause, AND)
self.demote_joins(existing_inner)
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False):
"""
Adds a Q-object to the current filter.
"""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, connector=connector)
joinpromoter.add_votes(needed_inner)
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walks the names path and turns them PathInfo tuples. Note that a
single name in 'names' can generate multiple PathInfos (m2m for
example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = opts.get_all_field_names() + list(self.aggregate_select)
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.rel.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
cur_names_with_path[1].append(
PathInfo(final_field.model._meta, opts, targets, final_field, False, True)
)
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info()
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Returns the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets, rest = self.names_to_path(
names, opts, allow_many, fail_on_missing=True)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for pos, join in enumerate(path):
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = alias, opts.db_table, join.join_field.get_joining_columns()
reuse = can_reuse if join.m2m else None
alias = self.join(
connection, reuse=reuse, nullable=nullable, join_field=join.join_field)
joins.append(alias)
if hasattr(final_field, 'field'):
final_field = final_field.field
return final_field, targets, opts, joins, path
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Returns the final target field and table alias and the new active
joins.
We will always trim any direct join if we have the target column
available already in the previous table. Reverse joins can't be
trimmed as we don't know if there is anything on the other side of
the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
join_targets = set(t.column for t in info.join_field.foreign_related_fields)
cur_targets = set(t.column for t in targets)
if not cur_targets.issubset(join_targets):
break
targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
query.remove_inherited_models()
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
alias, col = query.select[0].col
if self.is_nullable(query.select[0].field):
lookup_class = query.select[0].field.get_lookup('isnull')
lookup = lookup_class(Col(alias, query.select[0].field, query.select[0].field), False)
query.where.add(lookup, AND)
if alias in can_reuse:
select_field = query.select[0].field
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
lookup = lookup_class(Col(query.select[0].col[0], pk, pk),
Col(alias, pk, pk))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where = EmptyWhere()
self.having = EmptyWhere()
def is_empty(self):
return isinstance(self.where, EmptyWhere) or isinstance(self.having, EmptyWhere)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
self.select = []
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_aggregate_mask(())
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
_, targets, _, joins, path = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(targets, joins, path)
for target in targets:
self.select.append(SelectInfo((final_alias, target.column), target))
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(opts.get_all_field_names() + list(self.extra)
+ list(self.aggregate_select))
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for col, _ in self.select:
self.group_by.append(col)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0].col)
else:
opts = self.get_meta()
if not self.select:
count = self.aggregates_module.Count(
(self.join((None, opts.db_table, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0].col, distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self._aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = OrderedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is an OrderedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set(f.name for f in fields)
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def append_aggregate_mask(self, names):
if self.aggregate_select_mask is not None:
self.set_aggregate_mask(set(names).union(self.aggregate_select_mask))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
@property
def aggregate_select(self):
"""The OrderedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif not self._aggregates:
return {}
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = OrderedDict(
(k, v) for k, v in self.aggregates.items()
if k in self.aggregate_select_mask
)
return self._aggregate_select_cache
else:
return self.aggregates
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self._extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = OrderedDict(
(k, v) for k, v in self.extra.items()
if k in self.extra_select_mask
)
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trims joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also sets the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Returns a lookup usable for doing outerq.filter(lookup=self). Returns
also if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [t for t in self.tables if t in self._lookup_joins or t == self.tables[0]]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == self.LOUTER:
contains_louter = True
self.unref_alias(lookup_tables[trimmed_paths])
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != self.LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
self.select = [SelectInfo((select_alias, f.column), f) for f in select_fields]
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
and field.empty_strings_allowed):
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
A little helper to check if the given field is reverse-o2o. The field is
expected to be some sort of relation field or related object.
"""
return not hasattr(field, 'rel') and field.field.unique
def alias_diff(refcounts_before, refcounts_after):
"""
Given the before and after copies of refcounts works out which aliases
have been added to the after copy.
"""
# Use -1 as default value so that any join that is created, then trimmed
# is seen as added.
return set(t for t in refcounts_after
if refcounts_after[t] > refcounts_before.get(t, -1))
class JoinPromoter(object):
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.outer_votes = {}
self.inner_votes = {}
def add_votes(self, inner_votes):
"""
Add single vote per item to self.inner_votes. Parameter can be any
iterable.
"""
for voted in inner_votes:
self.inner_votes[voted] = self.inner_votes.get(voted, 0) + 1
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.inner_votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
|
theyaa/Impala
|
refs/heads/cdh5-trunk
|
thirdparty/hive-1.1.0-cdh5.7.0-SNAPSHOT/lib/py/thrift/transport/THttpClient.py
|
71
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from TTransport import *
from cStringIO import StringIO
import urlparse
import httplib
import warnings
class THttpClient(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None):
"""THttpClient supports two different types constructor parameters.
THttpClient(host, port, path) - deprecated
THttpClient(uri)
Only the second supports https."""
if port is not None:
warnings.warn("Please use the THttpClient('http://host:port/path') syntax", DeprecationWarning, stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urlparse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or httplib.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or httplib.HTTPS_PORT
self.host = parsed.hostname
self.path = parsed.path
self.__wbuf = StringIO()
self.__http = None
def open(self):
if self.scheme == 'http':
self.__http = httplib.HTTP(self.host, self.port)
else:
self.__http = httplib.HTTPS(self.host, self.port)
def close(self):
self.__http.close()
self.__http = None
def isOpen(self):
return self.__http != None
def read(self, sz):
return self.__http.file.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
if self.isOpen():
self.close()
self.open();
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = StringIO()
# HTTP request
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Host', self.host)
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.code, self.message, self.headers = self.__http.getreply()
|
jantman/python-mcollective
|
refs/heads/master
|
tests/integration/test_with_b64.py
|
2
|
import os
from pymco.test import ctxt
from . import base
FIXTURES_PATH = os.path.join(ctxt.ROOT, 'fixtures')
class Base64TestCase(base.IntegrationTestCase):
"""ActiveMQ integration test case."""
CTXT = {
'plugin.activemq.pool.1.port': 61614,
'plugin.activemq.pool.1.password': 'marionette',
'plugin.activemq.pool.1.base64': 'yes',
}
class TestWithBase64MCo20x(base.MCollective20x, Base64TestCase):
"""MCollective integration test case."""
class TestWithBase64MCo22x(base.MCollective22x, Base64TestCase):
"""MCollective integration test case."""
class TestWithBase64MCo23x(base.MCollective23x, Base64TestCase):
"""MCollective integration test case."""
class TestWithBase64MCo24x(base.MCollective24x, Base64TestCase):
"""MCollective integration test case."""
|
libscie/liberator
|
refs/heads/master
|
liberator/lib/python3.6/site-packages/appdirs.py
|
177
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 3)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
|
dvliman/jaikuengine
|
refs/heads/master
|
.google_appengine/lib/django-1.4/tests/modeltests/model_inheritance_same_model_name/tests.py
|
92
|
from __future__ import absolute_import
from django.test import TestCase
from ..model_inheritance.models import Title
class InheritanceSameModelNameTests(TestCase):
def setUp(self):
# The Title model has distinct accessors for both
# model_inheritance.Copy and model_inheritance_same_model_name.Copy
# models.
self.title = Title.objects.create(title='Lorem Ipsum')
def test_inheritance_related_name(self):
from modeltests.model_inheritance.models import Copy
self.assertEqual(
self.title.attached_model_inheritance_copy_set.create(
content='Save $ on V1agr@',
url='http://v1agra.com/',
title='V1agra is spam',
), Copy.objects.get(content='Save $ on V1agr@'))
def test_inheritance_with_same_model_name(self):
from modeltests.model_inheritance_same_model_name.models import Copy
self.assertEqual(
self.title.attached_model_inheritance_same_model_name_copy_set.create(
content='The Web framework for perfectionists with deadlines.',
url='http://www.djangoproject.com/',
title='Django Rocks'
), Copy.objects.get(content='The Web framework for perfectionists with deadlines.'))
def test_related_name_attribute_exists(self):
# The Post model doesn't have an attribute called 'attached_%(app_label)s_%(class)s_set'.
self.assertEqual(hasattr(self.title, 'attached_%(app_label)s_%(class)s_set'), False)
|
RouxRC/weboob
|
refs/heads/master
|
modules/amazon/fr/browser.py
|
5
|
# -*- coding: utf-8 -*-
# Copyright(C) 2015 Christophe Lampin
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.browser import URL
from ..browser import Amazon
from .pages import HomePage, LoginPage, AmazonPage, HistoryPage, \
OrderOldPage, OrderNewPage
__all__ = ['AmazonFR']
class AmazonFR(Amazon):
BASEURL = 'https://www.amazon.fr'
CURRENCY = u'€'
home = URL(r'/$', r'.*/homepage\.html.*', HomePage)
login = URL(r'/ap/signin/.*$', LoginPage)
history = URL(r'/gp/css/order-history.*$',
r'/gp/your-account/order-history.*$', HistoryPage)
order_old = URL(r'/gp/css/summary.*$',
r'/gp/css/summary/edit.html\?orderID=%\(order_id\)s',
r'/gp/digital/your-account/order-summary.html.*$',
r'/gp/digital/your-account/orderPe-summary.html\?orderID=%\(order_id\)s',
OrderOldPage)
order_new = URL(r'/gp/css/summary.*$',
r'/gp/your-account/order-details.*$',
r'/gp/your-account/order-details\?orderID=%\(order_id\)s',
OrderNewPage)
unknown = URL(r'/.*$', AmazonPage)
|
AndrewRook/game_designer
|
refs/heads/master
|
card_game/models.py
|
1
|
from django.db import models
# Create your models here.
class Card(models.Model):
creature = 'CR'
spell = 'SP'
card_type_choices = ((creature,'Creature'),(spell,'Spell'))
type = models.CharField(max_length=2,choices=card_type_choices,default=creature)
name = models.CharField(max_length=64)
cost = models.IntegerField(default=0)
art = models.ImageField(upload_to='card_images',blank=True)
text = models.TextField()
power = models.IntegerField(default=0)
toughness = models.IntegerField(default=1)
def __unicode__(self):
return self.name
class Unique_Card(models.Model):
card_id = models.ForeignKey(Card)
def __unicode__(self):
return self.card_id
class Unique_Version(models.Model):
name = models.CharField(max_length=128, unique=True)
description = models.CharField(max_length=256)
creation_date = models.DateField(auto_now_add=True)
def __unicode__(self):
return self.name
class Version(models.Model):
version_number = models.ForeignKey(Unique_Version)
card_id = models.ForeignKey(Card)
def __unicode__(self):
return self.version_number
|
myzhan/bottle-doc-zh-cn
|
refs/heads/master
|
test/testall.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys, os, glob
test_root = os.path.dirname(os.path.abspath(__file__))
test_files = glob.glob(os.path.join(test_root, 'test_*.py'))
os.chdir(test_root)
sys.path.insert(0, os.path.dirname(test_root))
sys.path.insert(0, test_root)
test_names = [os.path.basename(name)[:-3] for name in test_files]
if 'help' in sys.argv or '-h' in sys.argv:
sys.stdout.write('''Command line arguments:
fast: Skip server adapter tests.
verbose: Print tests even if they pass.
coverage: Measure code coverage.
html: Create a html coverage report. Requires 'coverage'
clean: Delete coverage or temporary files
''')
sys.exit(0)
if 'fast' in sys.argv:
sys.stderr.write("Warning: The 'fast' keyword skipps server tests.\n")
test_names.remove('test_server')
cov = None
if 'coverage' in sys.argv:
import coverage
cov = coverage.coverage(data_suffix=True, branch=True)
cov.start()
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
def run():
import bottle
bottle.debug(True)
vlevel = 2 if 'verbose' in sys.argv else 0
result = unittest.TextTestRunner(verbosity=vlevel).run(suite)
if cov:
cov.stop()
cov.save()
# Recreate coverage object so new files created in other processes are
# recognized
cnew = coverage.coverage(data_suffix=True, branch=True)
cnew.combine()
cnew.report(morfs=['bottle.py']+test_files, show_missing=False)
if 'html' in sys.argv:
print
cnew.html_report(morfs=['bottle.py']+test_files, directory='../build/coverage')
sys.exit((result.errors or result.failures) and 1 or 0)
if __name__ == '__main__':
run()
|
mgit-at/ansible
|
refs/heads/devel
|
lib/ansible/modules/crypto/openssl_pkcs12.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Copyright (c) 2017 Guillaume Delpierre <gde@llew.me>
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_pkcs12
author: "Guillaume Delpierre (@gdelpierre)"
version_added: "2.7"
short_description: Generate OpenSSL PKCS#12 archive.
description:
- This module allows one to (re-)generate PKCS#12.
requirements:
- python-pyOpenSSL
options:
action:
default: export
choices: ['parse', 'export']
description:
- C(export) or C(parse) a PKCS#12.
ca_certificates:
description:
- List of CA certificate to include.
certificate_path:
description:
- The path to read certificates and private keys from. Must be in PEM format.
force:
default: False
type: bool
description:
- Should the file be regenerated even if it already exists.
friendly_name:
aliases: ['name']
description:
- Specifies the friendly name for the certificate and private key.
iter_size:
default: 2048
description:
- Number of times to repeat the encryption step.
maciter_size:
default: 1
description:
- Number of times to repeat the MAC step.
passphrase:
description:
- The PKCS#12 password.
path:
required: True
description:
- Filename to write the PKCS#12 file to.
privatekey_passphrase:
description:
- Passphrase source to decrypt any input private keys with.
privatekey_path:
description:
- File to read private key from.
state:
default: 'present'
choices: ['present', 'absent']
description:
- Whether the file should exist or not.
All parameters except C(path) are ignored when state is C(absent).
src:
description:
- PKCS#12 file path to parse.
extends_documentation_fragment:
- files
'''
EXAMPLES = '''
- name: 'Generate PKCS#12 file'
openssl_pkcs12:
action: export
path: '/opt/certs/ansible.p12'
friendly_name: 'raclette'
privatekey_path: '/opt/certs/keys/key.pem'
certificate_path: '/opt/certs/cert.pem'
ca_certificates: '/opt/certs/ca.pem'
state: present
- name: 'Change PKCS#12 file permission'
openssl_pkcs12:
action: export
path: '/opt/certs/ansible.p12'
friendly_name: 'raclette'
privatekey_path: '/opt/certs/keys/key.pem'
certificate_path: '/opt/certs/cert.pem'
ca_certificates: '/opt/certs/ca.pem'
state: present
mode: 0600
- name: 'Regen PKCS#12 file'
openssl_pkcs12:
action: export
src: '/opt/certs/ansible.p12'
path: '/opt/certs/ansible.p12'
friendly_name: 'raclette'
privatekey_path: '/opt/certs/keys/key.pem'
certificate_path: '/opt/certs/cert.pem'
ca_certificates: '/opt/certs/ca.pem'
state: present
mode: 0600
force: True
- name: 'Dump/Parse PKCS#12 file'
openssl_pkcs12:
action: parse
src: '/opt/certs/ansible.p12'
path: '/opt/certs/ansible.pem'
state: present
- name: 'Remove PKCS#12 file'
openssl_pkcs12:
path: '/opt/certs/ansible.p12'
state: absent
'''
RETURN = '''
filename:
description: Path to the generate PKCS#12 file.
returned: changed or success
type: string
sample: /opt/certs/ansible.p12
privatekey:
description: Path to the TLS/SSL private key the public key was generated from
returned: changed or success
type: string
sample: /etc/ssl/private/ansible.com.pem
'''
import stat
import os
try:
from OpenSSL import crypto
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_bytes, to_native
class PkcsError(crypto_utils.OpenSSLObjectError):
pass
class Pkcs(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(Pkcs, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.action = module.params['action']
self.ca_certificates = module.params['ca_certificates']
self.certificate_path = module.params['certificate_path']
self.friendly_name = module.params['friendly_name']
self.iter_size = module.params['iter_size']
self.maciter_size = module.params['maciter_size']
self.passphrase = module.params['passphrase']
self.pkcs12 = None
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.privatekey_path = module.params['privatekey_path']
self.src = module.params['src']
self.mode = module.params['mode']
if not self.mode:
self.mode = 0o400
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(Pkcs, self).check(module, perms_required)
def _check_pkey_passphrase():
if self.privatekey_passphrase:
try:
crypto_utils.load_privatekey(self.path,
self.privatekey_passphrase)
except crypto.Error:
return False
return True
if not state_and_perms:
return state_and_perms
return _check_pkey_passphrase
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'filename': self.path,
}
if self.privatekey_path:
result['privatekey_path'] = self.privatekey_path
return result
def generate(self, module):
"""Generate PKCS#12 file archive."""
self.pkcs12 = crypto.PKCS12()
try:
self.remove()
except PkcsError as exc:
module.fail_json(msg=to_native(exc))
if self.ca_certificates:
ca_certs = [crypto_utils.load_certificate(ca_cert) for ca_cert
in self.ca_certificates]
self.pkcs12.set_ca_certificates(ca_certs)
if self.certificate_path:
self.pkcs12.set_certificate(crypto_utils.load_certificate(
self.certificate_path))
if self.friendly_name:
self.pkcs12.set_friendlyname(to_bytes(self.friendly_name))
if self.privatekey_path:
self.pkcs12.set_privatekey(crypto_utils.load_privatekey(
self.privatekey_path,
self.privatekey_passphrase)
)
try:
pkcs12_file = os.open(self.path,
os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
self.mode)
os.write(pkcs12_file, self.pkcs12.export(self.passphrase,
self.iter_size, self.maciter_size))
os.close(pkcs12_file)
except (IOError, OSError) as exc:
self.remove()
raise PkcsError(exc)
def parse(self, module):
"""Read PKCS#12 file."""
try:
self.remove()
p12 = crypto.load_pkcs12(open(self.src, 'rb').read(),
self.passphrase)
pkey = crypto.dump_privatekey(crypto.FILETYPE_PEM,
p12.get_privatekey())
crt = crypto.dump_certificate(crypto.FILETYPE_PEM,
p12.get_certificate())
pkcs12_file = os.open(self.path,
os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
self.mode)
os.write(pkcs12_file, b'%s%s' % (pkey, crt))
os.close(pkcs12_file)
except IOError as exc:
self.remove()
raise PkcsError(exc)
def main():
argument_spec = dict(
action=dict(type='str', default='export',
choices=['parse', 'export']),
ca_certificates=dict(type='list', elements='path'),
certificate_path=dict(type='path'),
force=dict(type='bool', default=False),
friendly_name=dict(type='str', aliases=['name']),
iter_size=dict(type='int', default=2048),
maciter_size=dict(type='int', default=1),
passphrase=dict(type='str', no_log=True),
path=dict(type='path', required=True),
privatekey_passphrase=dict(type='str', no_log=True),
privatekey_path=dict(type='path'),
state=dict(type='str', default='present',
choices=['present', 'absent']),
src=dict(type='path'),
)
required_if = [
['action', 'parse', ['src']],
]
required_together = [
['privatekey_path', 'friendly_name'],
]
module = AnsibleModule(
add_file_common_args=True,
argument_spec=argument_spec,
required_if=required_if,
required_together=required_together,
supports_check_mode=True,
)
if not pyopenssl_found:
module.fail_json(msg='The python pyOpenSSL library is required')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or '
'the path is not a directory' % base_dir
)
pkcs12 = Pkcs(module)
changed = False
if module.params['state'] == 'present':
if module.check_mode:
result = pkcs12.dump()
result['changed'] = module.params['force'] or not pkcs12.check(module)
module.exit_json(**result)
try:
if not pkcs12.check(module, perms_required=False) or module.params['force']:
if module.params['action'] == 'export':
if not module.params['friendly_name']:
module.fail_json(msg='Friendly_name is required')
pkcs12.generate(module)
changed = True
else:
pkcs12.parse(module)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, changed):
changed = True
except PkcsError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = pkcs12.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
if os.path.exists(module.params['path']):
try:
pkcs12.remove()
changed = True
except PkcsError as exc:
module.fail_json(msg=to_native(exc))
result = pkcs12.dump()
result['changed'] = changed
if os.path.exists(module.params['path']):
file_mode = "%04o" % stat.S_IMODE(os.stat(module.params['path']).st_mode)
result['mode'] = file_mode
module.exit_json(**result)
if __name__ == '__main__':
main()
|
kifcaliph/odoo
|
refs/heads/8.0
|
addons/hr_expense/report/hr_expense_report.py
|
287
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_expense_report(osv.osv):
_name = "hr.expense.report"
_description = "Expenses Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date ', readonly=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Force Journal', readonly=True),
'product_qty':fields.float('Product Quantity', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee's Name", readonly=True),
'date_confirm': fields.date('Confirmation Date', readonly=True),
'date_valid': fields.date('Validation Date', readonly=True),
'department_id':fields.many2one('hr.department','Department', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Validation User', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'price_total':fields.float('Total Price', readonly=True, digits_compute=dp.get_precision('Account')),
'delay_valid':fields.float('Delay to Valid', readonly=True),
'delay_confirm':fields.float('Delay to Confirm', readonly=True),
'analytic_account': fields.many2one('account.analytic.account','Analytic account',readonly=True),
'price_average':fields.float('Average Price', readonly=True, digits_compute=dp.get_precision('Account')),
'nbr':fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'no_of_products':fields.integer('# of Products', readonly=True),
'no_of_account':fields.integer('# of Accounts', readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('confirm', 'Waiting confirmation'),
('accepted', 'Accepted'),
('done', 'Done'),
('cancelled', 'Cancelled')],
'Status', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_expense_report')
cr.execute("""
create or replace view hr_expense_report as (
select
min(l.id) as id,
s.date as date,
s.create_date as create_date,
s.employee_id,
s.journal_id,
s.currency_id,
s.date_confirm as date_confirm,
s.date_valid as date_valid,
s.user_valid as user_id,
s.department_id,
avg(extract('epoch' from age(s.date_valid,s.date)))/(3600*24) as delay_valid,
avg(extract('epoch' from age(s.date_valid,s.date_confirm)))/(3600*24) as delay_confirm,
l.product_id as product_id,
l.analytic_account as analytic_account,
sum(l.unit_quantity * u.factor) as product_qty,
s.company_id as company_id,
sum(l.unit_quantity*l.unit_amount) as price_total,
(sum(l.unit_quantity*l.unit_amount)/sum(case when l.unit_quantity=0 or u.factor=0 then 1 else l.unit_quantity * u.factor end))::decimal(16,2) as price_average,
count(*) as nbr,
(select unit_quantity from hr_expense_line where id=l.id and product_id is not null) as no_of_products,
(select analytic_account from hr_expense_line where id=l.id and analytic_account is not null) as no_of_account,
s.state
from hr_expense_line l
left join hr_expense_expense s on (s.id=l.expense_id)
left join product_uom u on (u.id=l.uom_id)
group by
s.date,
s.create_date,
s.date_confirm,
s.date_valid,
l.product_id,
l.analytic_account,
s.currency_id,
s.user_valid,
s.department_id,
l.uom_id,
l.id,
s.state,
s.journal_id,
s.company_id,
s.employee_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wkschwartz/django
|
refs/heads/stable/3.2.x
|
django/db/models/sql/datastructures.py
|
11
|
"""
Useful auxiliary data structures for query construction. Not useful outside
the SQL domain.
"""
from django.db.models.sql.constants import INNER, LOUTER
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, names_pos, path_with_names):
self.level = names_pos
# The path travelled, this includes the path to the multijoin.
self.names_with_path = path_with_names
class Empty:
pass
class Join:
"""
Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the
FROM entry. For example, the SQL generated could be
LEFT OUTER JOIN "sometable" T1 ON ("othertable"."sometable_id" = "sometable"."id")
This class is primarily used in Query.alias_map. All entries in alias_map
must be Join compatible by providing the following attributes and methods:
- table_name (string)
- table_alias (possible alias for the table, can be None)
- join_type (can be None for those entries that aren't joined from
anything)
- parent_alias (which table is this join's parent, can be None similarly
to join_type)
- as_sql()
- relabeled_clone()
"""
def __init__(self, table_name, parent_alias, table_alias, join_type,
join_field, nullable, filtered_relation=None):
# Join table
self.table_name = table_name
self.parent_alias = parent_alias
# Note: table_alias is not necessarily known at instantiation time.
self.table_alias = table_alias
# LOUTER or INNER
self.join_type = join_type
# A list of 2-tuples to use in the ON clause of the JOIN.
# Each 2-tuple will create one join condition in the ON clause.
self.join_cols = join_field.get_joining_columns()
# Along which field (or ForeignObjectRel in the reverse join case)
self.join_field = join_field
# Is this join nullabled?
self.nullable = nullable
self.filtered_relation = filtered_relation
def as_sql(self, compiler, connection):
"""
Generate the full
LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params
clause for this join.
"""
join_conditions = []
params = []
qn = compiler.quote_name_unless_alias
qn2 = connection.ops.quote_name
# Add a join condition for each pair of joining columns.
for lhs_col, rhs_col in self.join_cols:
join_conditions.append('%s.%s = %s.%s' % (
qn(self.parent_alias),
qn2(lhs_col),
qn(self.table_alias),
qn2(rhs_col),
))
# Add a single condition inside parentheses for whatever
# get_extra_restriction() returns.
extra_cond = self.join_field.get_extra_restriction(
compiler.query.where_class, self.table_alias, self.parent_alias)
if extra_cond:
extra_sql, extra_params = compiler.compile(extra_cond)
join_conditions.append('(%s)' % extra_sql)
params.extend(extra_params)
if self.filtered_relation:
extra_sql, extra_params = compiler.compile(self.filtered_relation)
if extra_sql:
join_conditions.append('(%s)' % extra_sql)
params.extend(extra_params)
if not join_conditions:
# This might be a rel on the other end of an actual declared field.
declared_field = getattr(self.join_field, 'field', self.join_field)
raise ValueError(
"Join generated an empty ON clause. %s did not yield either "
"joining columns or extra restrictions." % declared_field.__class__
)
on_clause_sql = ' AND '.join(join_conditions)
alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias)
sql = '%s %s%s ON (%s)' % (self.join_type, qn(self.table_name), alias_str, on_clause_sql)
return sql, params
def relabeled_clone(self, change_map):
new_parent_alias = change_map.get(self.parent_alias, self.parent_alias)
new_table_alias = change_map.get(self.table_alias, self.table_alias)
if self.filtered_relation is not None:
filtered_relation = self.filtered_relation.clone()
filtered_relation.path = [change_map.get(p, p) for p in self.filtered_relation.path]
else:
filtered_relation = None
return self.__class__(
self.table_name, new_parent_alias, new_table_alias, self.join_type,
self.join_field, self.nullable, filtered_relation=filtered_relation,
)
@property
def identity(self):
return (
self.__class__,
self.table_name,
self.parent_alias,
self.join_field,
self.filtered_relation,
)
def __eq__(self, other):
if not isinstance(other, Join):
return NotImplemented
return self.identity == other.identity
def __hash__(self):
return hash(self.identity)
def equals(self, other, with_filtered_relation):
if with_filtered_relation:
return self == other
return self.identity[:-1] == other.identity[:-1]
def demote(self):
new = self.relabeled_clone({})
new.join_type = INNER
return new
def promote(self):
new = self.relabeled_clone({})
new.join_type = LOUTER
return new
class BaseTable:
"""
The BaseTable class is used for base table references in FROM clause. For
example, the SQL "foo" in
SELECT * FROM "foo" WHERE somecond
could be generated by this class.
"""
join_type = None
parent_alias = None
filtered_relation = None
def __init__(self, table_name, alias):
self.table_name = table_name
self.table_alias = alias
def as_sql(self, compiler, connection):
alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias)
base_sql = compiler.quote_name_unless_alias(self.table_name)
return base_sql + alias_str, []
def relabeled_clone(self, change_map):
return self.__class__(self.table_name, change_map.get(self.table_alias, self.table_alias))
@property
def identity(self):
return self.__class__, self.table_name, self.table_alias
def __eq__(self, other):
if not isinstance(other, BaseTable):
return NotImplemented
return self.identity == other.identity
def __hash__(self):
return hash(self.identity)
def equals(self, other, with_filtered_relation):
return self.identity == other.identity
|
daajoe/doorbell
|
refs/heads/master
|
pjproject/tests/pjsua/scripts-sendto/410_fmtp_amrnb_offer_octet_align.py
|
42
|
# $Id: 410_fmtp_amrnb_offer_octet_align.py 3664 2011-07-19 03:42:28Z nanang $
import inc_sip as sip
import inc_sdp as sdp
# Answer for codec AMR should contain fmtp octet-align=1
sdp = \
"""
v=0
o=- 3428650655 3428650655 IN IP4 192.168.1.9
s=pjmedia
c=IN IP4 192.168.1.9
t=0 0
a=X-nat:0
m=audio 4000 RTP/AVP 99 101
a=rtcp:4001 IN IP4 192.168.1.9
a=rtpmap:99 AMR/8000
a=fmtp:99 octet-align=1
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
"""
pjsua_args = "--null-audio --auto-answer 200 --add-codec AMR"
extra_headers = ""
include = ["octet-align=1"] # response must include 'octet-align=1'
exclude = []
sendto_cfg = sip.SendtoCfg("AMR negotiation should response with fmtp 'octet-align=1'", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
|
nojhan/weboob-devel
|
refs/heads/master
|
modules/barclays/browser.py
|
6
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.deprecated.browser import Browser, BrowserIncorrectPassword
from .pages import LoginPage, Login2Page, IndexPage, AccountsPage, TransactionsPage, \
CardPage, ValuationPage, LoanPage, MarketPage, AssurancePage
__all__ = ['Barclays']
class Barclays(Browser):
PROTOCOL = 'https'
DOMAIN = 'www.barclays.fr'
PAGES = {'https?://.*.barclays.fr/\d-index.html': IndexPage,
'https://.*.barclays.fr/barclaysnetV2/logininstit.do.*': LoginPage,
'https://.*.barclays.fr/barclaysnetV2/loginSecurite.do.*': Login2Page,
'https://.*.barclays.fr/barclaysnetV2/tbord.do.*': AccountsPage,
'https://.*.barclays.fr/barclaysnetV2/releve.do.*': TransactionsPage,
'https://.*.barclays.fr/barclaysnetV2/cartes.do.*': CardPage,
'https://.*.barclays.fr/barclaysnetV2/valuationViewBank.do.*': ValuationPage,
'https://.*.barclays.fr/barclaysnetV2/pret.do.*': LoanPage,
'https://.*.barclays.fr/barclaysnetV2/titre.do.*': MarketPage,
'https://.*.barclays.fr/barclaysnetV2/assurance.do.*': AssurancePage,
}
def __init__(self, secret, *args, **kwargs):
self.secret = secret
Browser.__init__(self, *args, **kwargs)
def is_logged(self):
return self.page is not None and not self.is_on_page((LoginPage, IndexPage, Login2Page))
def home(self):
if self.is_logged():
self.location('tbord.do')
else:
self.login()
def login(self):
"""
Attempt to log in.
Note: this method does nothing if we are already logged in.
"""
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if self.is_logged():
return
if not self.is_on_page(LoginPage):
self.location('https://b-net.barclays.fr/barclaysnetV2/logininstit.do?lang=fr&nodoctype=0', no_login=True)
self.page.login(self.username, self.password)
if not self.page.has_redirect():
raise BrowserIncorrectPassword()
self.location('loginSecurite.do', no_login=True)
self.page.login(self.secret)
if not self.is_logged():
raise BrowserIncorrectPassword()
def get_accounts_list(self):
if not self.is_on_page(AccountsPage):
self.location('tbord.do')
return self.page.get_list()
def get_account(self, id):
assert isinstance(id, basestring)
l = self.get_accounts_list()
for a in l:
if a.id == id:
return a
return None
def get_history(self, account):
if not self.is_on_page(AccountsPage):
self.location('tbord.do')
self.location(account._link)
assert self.is_on_page((TransactionsPage, ValuationPage, LoanPage, MarketPage, AssurancePage))
for tr in self.page.get_history():
yield tr
for tr in self.get_card_operations(account):
yield tr
def get_card_operations(self, account):
for card in account._card_links:
if not self.is_on_page(AccountsPage):
self.location('tbord.do')
self.location(card)
assert self.is_on_page(CardPage)
for tr in self.page.get_history():
yield tr
|
Teagan42/home-assistant
|
refs/heads/dev
|
homeassistant/components/fan/device_trigger.py
|
8
|
"""Provides device automations for Fan."""
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType, state
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
TRIGGER_TYPES = {"turned_on", "turned_off"}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Fan devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add triggers for each entity that belongs to this integration
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turned_on",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turned_off",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] == "turned_on":
from_state = STATE_OFF
to_state = STATE_ON
else:
from_state = STATE_ON
to_state = STATE_OFF
state_config = {
state.CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state.CONF_FROM: from_state,
state.CONF_TO: to_state,
}
state_config = state.TRIGGER_SCHEMA(state_config)
return await state.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
|
Karel-van-de-Plassche/bokeh
|
refs/heads/master
|
tests/integration/interaction/test_range_min_max.py
|
5
|
from __future__ import absolute_import
from bokeh.io import save
from bokeh.models import (
BoxZoomTool,
ColumnDataSource,
DataRange1d,
PanTool,
Plot,
Range1d,
Rect,
LinearAxis
)
from selenium.webdriver.common.action_chains import ActionChains
from tests.integration.utils import has_no_console_errors, wait_for_canvas_resize
import pytest
pytestmark = pytest.mark.integration
def make_plot(xr=None, yr=None):
if xr is None:
x_range = Range1d(0, 3, bounds=None)
else:
x_range = xr
if yr is None:
y_range = Range1d(0, 3, bounds=None)
else:
y_range = yr
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1]))
# explicitly set plot.id so that the plot can be accessed from Bokeh.index in browser
plot = Plot(id='plot-id', plot_height=400, plot_width=400, x_range=x_range, y_range=y_range, min_border=0)
plot.add_glyph(source, Rect(x='x', y='y', width=0.9, height=0.9))
plot.add_tools(PanTool(), BoxZoomTool())
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
return plot
def pan_plot(selenium, pan_x=None, pan_y=None):
canvas = selenium.find_element_by_tag_name('canvas')
wait_for_canvas_resize(canvas, selenium)
# Enable the pan tool
pan_buttons = selenium.find_elements_by_css_selector('.bk-button-bar-list[type="pan"] .bk-toolbar-button')
pan_button = pan_buttons[0]
if 'active' not in pan_button.get_attribute('class'):
pan_button.click()
actions = ActionChains(selenium)
actions.move_to_element_with_offset(canvas, 200, 200)
actions.click_and_hold()
actions.move_by_offset(pan_x, pan_y)
actions.release()
actions.perform()
def test_x_range_does_not_pan_left_of_x_min(output_file_url, selenium):
x_range_min = -1
plot = make_plot(xr=Range1d(0, 3, bounds=(x_range_min, None)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=150, pan_y=0)
new_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.start)"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == x_range_min
def test_x_range_does_not_pan_right_of_x_max(output_file_url, selenium):
x_range_max = 4
plot = make_plot(xr=Range1d(0, 3, bounds=(None, x_range_max)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=-150, pan_y=0)
new_range_end = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.end)"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == x_range_max
def test_y_range_does_not_pan_below_y_min(output_file_url, selenium):
y_range_min = -1
plot = make_plot(yr=Range1d(0, 3, bounds=(y_range_min, None)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=-150)
new_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.y_range.start)"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_start) == y_range_min
def test_y_range_does_not_pan_above_y_max(output_file_url, selenium):
y_range_max = 4
plot = make_plot(yr=Range1d(0, 3, bounds=(None, y_range_max)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=150)
new_range_end = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.y_range.end)"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == y_range_max
############################
# Test reversed ranges
############################
def test_reversed_x_range_does_not_pan_right_of_x_min(output_file_url, selenium):
x_range_min = -1
plot = make_plot(xr=Range1d(3, 0, bounds=(x_range_min, None)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=-150, pan_y=0)
new_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.min)"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == x_range_min
def test_reversed_x_range_does_not_pan_left_of_x_max(output_file_url, selenium):
x_range_max = 4
plot = make_plot(xr=Range1d(3, 0, bounds=(None, x_range_max)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=150, pan_y=0)
new_range_end = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.max)"""))
selenium.switch_to_alert().dismiss() # This is not necessary but assists debugging
assert round(new_range_end) == x_range_max
def test_reversed_y_range_does_not_pan_above_y_min(output_file_url, selenium):
y_range_min = -1
plot = make_plot(yr=Range1d(3, 0, bounds=(y_range_min, None)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=150)
new_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.y_range.min)"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_start) == y_range_min
def test_reversed_y_range_does_not_pan_below_y_max(output_file_url, selenium):
y_range_max = 4
plot = make_plot(yr=Range1d(3, 0, bounds=(None, y_range_max)))
save(plot)
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Pan plot and test for new range value
pan_plot(selenium, pan_x=50, pan_y=-150)
new_range_end = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.y_range.max)"""))
selenium.switch_to_alert().dismiss()
assert round(new_range_end) == y_range_max
############################
# Test auto bounds
############################
def zoom_plot(selenium):
canvas = selenium.find_element_by_tag_name('canvas')
wait_for_canvas_resize(canvas, selenium)
# Enable the box zoom tool
pan_buttons = selenium.find_elements_by_css_selector('.bk-button-bar-list[type="pan"] .bk-toolbar-button')
zoom_button = pan_buttons[1]
if 'active' not in zoom_button.get_attribute('class'):
zoom_button.click()
actions = ActionChains(selenium)
actions.move_to_element_with_offset(canvas, 30, 30)
actions.click_and_hold()
actions.move_by_offset(200, 200)
actions.release()
actions.perform()
def _assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium):
selenium.get(output_file_url)
assert has_no_console_errors(selenium)
# Zoom into plot so we can pan around a little
zoom_plot(selenium)
# Now the plot is zoomed in, try a little to the right
pan_plot(selenium, pan_x=-50, pan_y=0)
x_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.start)"""))
selenium.switch_to_alert().dismiss()
assert x_range_start > 0.5
# Now try panning far to left to check bounds
pan_plot(selenium, pan_x=100, pan_y=0)
x_range_start = float(selenium.execute_script("""alert(Bokeh.index['plot-id'].model.x_range.start)"""))
selenium.switch_to_alert().dismiss()
assert x_range_start > 0.4
assert x_range_start < 0.5
def test_autorange_prevents_panning_but_can_zoom_in_with_datarange1d(output_file_url, selenium):
plot = make_plot(xr=DataRange1d(bounds='auto'), yr=DataRange1d(bounds='auto'))
save(plot)
_assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium)
@pytest.mark.xfail
def test_autorange_prevents_panning_but_can_zoom_in_with_range1d(output_file_url, selenium):
plot = make_plot(xr=Range1d(0.45, 3, bounds='auto'), yr=DataRange1d(0, 3, bounds='auto'))
save(plot)
_assert_autorange_prevents_panning_but_can_zoom(output_file_url, selenium)
############################
# Test no bounds
############################
#def _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium):
# selenium.get(output_file_url)
#
# pan_plot(selenium, pan_x=-1000, pan_y=2000)
#
# x_range_start = float(selenium.execute_script("""alert(window.get_x_range_start())"""))
# selenium.switch_to_alert().dismiss()
# assert x_range_start > 5
#
# y_range_start = float(selenium.execute_script("""alert(window.get_y_range_start())"""))
# selenium.switch_to_alert().dismiss()
# assert y_range_start > 5
#
#
#def test_no_bounds_allows_unlimited_panning_with_datarange1d(output_file_url, selenium):
# plot = make_plot_with_callback(xr=DataRange1d(bounds=None), yr=DataRange1d(bounds=None))
# save(plot)
# _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium)
#
#
#def test_no_bounds_allows_unlimited_panning_with_range1d(output_file_url, selenium):
# plot = make_plot_with_callback(xr=Range1d(0.45, 3, bounds=None), yr=DataRange1d(0, 3, bounds=None))
# save(plot)
# _assert_no_bounds_allows_unlimited_panning(output_file_url, selenium)
|
pris54/reactjs-simple-form
|
refs/heads/master
|
node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py
|
1831
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
breznak/nupic
|
refs/heads/master
|
examples/opf/experiments/multistep/hotgym_best_sp_5step/description.py
|
32
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'clVerbosity': 0},
'inferenceType': 'NontemporalMultiStep',
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': ( 21,
3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
1),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tpParams': { 'activationThreshold': 13,
'minThreshold': 9,
'verbosity': 0}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
|
gt-ros-pkg/hrl-haptic-manip
|
refs/heads/master
|
hrl_common_code_darpa_m3/src/hrl_common_code_darpa_m3/robot_config/multi_link_common.py
|
1
|
#!/usr/bin/env python
height = 0.0
total_length = 1.2
total_mass = 6.0 #kg, the average weight of a male human arm is 11 lbs, this is slightly heavier than that
|
tyarkoni/featureX
|
refs/heads/master
|
pliers/tests/test_config.py
|
2
|
import tempfile
import os
import json
import pytest
import pliers
from pliers.config import reset_options
def test_load_from_standard_paths():
# Verify defaults
reset_options(update_from_file=False)
assert pliers.config._settings == pliers.config._default_settings
# Verify that PLIERS_CONFIG and local dir take precedence
env_config = {"n_jobs": 200, "log_transformations": False}
cwd_config = {"log_transformations": True, "parallelize": True}
handle, f = tempfile.mkstemp(suffix='.json')
json.dump(env_config, open(f, 'w'))
os.environ['PLIERS_CONFIG'] = f
target = 'pliers_config.json'
if os.path.exists(target):
pytest.skip("Cannot test pliers config because the default config file"
" (pliers_config.json) already exists in the current "
"working directory. Skipping test to avoid overwriting.")
json.dump(cwd_config, open(target, 'w'))
reset_options(True)
os.unlink(target)
opts = pliers.config._settings
assert opts['n_jobs'] == 200
assert opts['log_transformations']
assert opts['parallelize']
reset_options(False)
def test_set_option():
reset_options(False)
opts = pliers.config._settings
pliers.config.set_options(n_jobs=100, progress_bar=True)
assert opts['n_jobs'] == 100
assert opts['progress_bar']
with pytest.raises(ValueError):
pliers.config.set_option('bad_key', False)
reset_options(False)
def test_get_option():
reset_options(False)
assert not pliers.config.get_option('parallelize')
|
romain-dartigues/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/packet/packet_device.py
|
31
|
#!/usr/bin/python
# (c) 2016, Tomas Karasek <tom.to.the.k@gmail.com>
# (c) 2016, Matt Baldwin <baldwin@stackpointcloud.com>
# (c) 2016, Thibaud Morel l'Horset <teebes@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: packet_device
short_description: Manage a bare metal server in the Packet Host.
description:
- Manage a bare metal server in the Packet Host (a "device" in the API terms).
- When the machine is created it can optionally wait for public IP address, or for active state.
- This module has a dependency on packet >= 1.0.
- API is documented at U(https://www.packet.net/developers/api/devices).
version_added: "2.3"
author:
- Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
- Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
- Thibaud Morel l'Horset (@teebes) <teebes@gmail.com>
options:
auth_token:
description:
- Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
count:
description:
- The number of devices to create. Count number can be included in hostname via the %d string formatter.
default: 1
count_offset:
description:
- From which number to start the count.
default: 1
device_ids:
description:
- List of device IDs on which to operate.
facility:
description:
- Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
features:
description:
- Dict with "features" for device creation. See Packet API docs for details.
hostnames:
description:
- A hostname of a device, or a list of hostnames.
- If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
- If only one hostname, it might be expanded to list if I(count)>1.
aliases: [name]
locked:
description:
- Whether to lock a created device.
default: false
version_added: "2.4"
aliases: [lock]
type: bool
operating_system:
description:
- OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
plan:
description:
- Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
project_id:
description:
- ID of project of the device.
required: true
state:
description:
- Desired state of the device.
- If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
- If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
choices: [present, absent, active, inactive, rebooted]
default: present
user_data:
description:
- Userdata blob made available to the machine
wait_for_public_IPv:
description:
- Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
- If set to 4, it will wait until IPv4 is assigned to the instance.
- If set to 6, wait until public IPv6 is assigned to the instance.
choices: [4,6]
version_added: "2.4"
wait_timeout:
description:
- How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
- If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
default: 900
ipxe_script_url:
description:
- URL of custom iPXE script for provisioning.
- More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
version_added: "2.4"
always_pxe:
description:
- Persist PXE as the first boot option.
- Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE.
default: false
version_added: "2.4"
type: bool
requirements:
- "packet-python >= 1.35"
notes:
- Doesn't support check mode.
'''
EXAMPLES = '''
# All the examples assume that you have your Packet api token in env var PACKET_API_TOKEN.
# You can also pass it to the auth_token parameter of the module instead.
# Creating devices
- name: create 1 device
hosts: localhost
tasks:
- packet_device:
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
hostnames: myserver
operating_system: ubuntu_16_04
plan: baremetal_0
facility: sjc1
# Create the same device and wait until it is in state "active", (when it's
# ready for other API operations). Fail if the devices in not "active" in
# 10 minutes.
- name: create device and wait up to 10 minutes for active state
hosts: localhost
tasks:
- packet_device:
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
hostnames: myserver
operating_system: ubuntu_16_04
plan: baremetal_0
facility: sjc1
state: active
wait_timeout: 600
- name: create 3 ubuntu devices called server-01, server-02 and server-03
hosts: localhost
tasks:
- packet_device:
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
hostnames: server-%02d
count: 3
operating_system: ubuntu_16_04
plan: baremetal_0
facility: sjc1
- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH
hosts: localhost
tasks:
- name: create 3 devices and register their facts
packet_device:
hostnames: [coreos-one, coreos-two, coreos-three]
operating_system: coreos_stable
plan: baremetal_0
facility: ewr1
locked: true
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
wait_for_public_IPv: 4
user_data: |
#cloud-config
ssh_authorized_keys:
- {{ lookup('file', 'my_packet_sshkey') }}
coreos:
etcd:
discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
addr: $private_ipv4:4001
peer-addr: $private_ipv4:7001
fleet:
public-ip: $private_ipv4
units:
- name: etcd.service
command: start
- name: fleet.service
command: start
register: newhosts
- name: wait for ssh
wait_for:
delay: 1
host: "{{ item.public_ipv4 }}"
port: 22
state: started
timeout: 500
with_items: "{{ newhosts.devices }}"
# Other states of devices
- name: remove 3 devices by uuid
hosts: localhost
tasks:
- packet_device:
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
state: absent
device_ids:
- 1fb4faf8-a638-4ac7-8f47-86fe514c30d8
- 2eb4faf8-a638-4ac7-8f47-86fe514c3043
- 6bb4faf8-a638-4ac7-8f47-86fe514c301f
'''
RETURN = '''
changed:
description: True if a device was altered in any way (created, modified or removed)
type: bool
sample: True
returned: success
devices:
description: Information about each device that was processed
type: list
sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
"public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12",
"tags": [], "locked": false, "state": "provisioning",
"public_ipv6": ""2604:1380:2:5200::3"}]'
returned: success
''' # NOQA
import os
import re
import time
import uuid
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
HAS_PACKET_SDK = True
try:
import packet
except ImportError:
HAS_PACKET_SDK = False
from ansible.module_utils.basic import AnsibleModule
NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
MAX_DEVICES = 100
PACKET_DEVICE_STATES = (
'queued',
'provisioning',
'failed',
'powering_on',
'active',
'powering_off',
'inactive',
'rebooting',
)
PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present']
def serialize_device(device):
"""
Standard represenation for a device as returned by various tasks::
{
'id': 'device_id'
'hostname': 'device_hostname',
'tags': [],
'locked': false,
'state': 'provisioning',
'ip_addresses': [
{
"address": "147.75.194.227",
"address_family": 4,
"public": true
},
{
"address": "2604:1380:2:5200::3",
"address_family": 6,
"public": true
},
{
"address": "10.100.11.129",
"address_family": 4,
"public": false
}
],
"private_ipv4": "10.100.11.129",
"public_ipv4": "147.75.194.227",
"public_ipv6": "2604:1380:2:5200::3",
}
"""
device_data = {}
device_data['id'] = device.id
device_data['hostname'] = device.hostname
device_data['tags'] = device.tags
device_data['locked'] = device.locked
device_data['state'] = device.state
device_data['ip_addresses'] = [
{
'address': addr_data['address'],
'address_family': addr_data['address_family'],
'public': addr_data['public'],
}
for addr_data in device.ip_addresses
]
# Also include each IPs as a key for easier lookup in roles.
# Key names:
# - public_ipv4
# - public_ipv6
# - private_ipv4
# - private_ipv6 (if there is one)
for ipdata in device_data['ip_addresses']:
if ipdata['public']:
if ipdata['address_family'] == 6:
device_data['public_ipv6'] = ipdata['address']
elif ipdata['address_family'] == 4:
device_data['public_ipv4'] = ipdata['address']
elif not ipdata['public']:
if ipdata['address_family'] == 6:
# Packet doesn't give public ipv6 yet, but maybe one
# day they will
device_data['private_ipv6'] = ipdata['address']
elif ipdata['address_family'] == 4:
device_data['private_ipv4'] = ipdata['address']
return device_data
def is_valid_hostname(hostname):
return re.match(HOSTNAME_RE, hostname) is not None
def is_valid_uuid(myuuid):
try:
val = uuid.UUID(myuuid, version=4)
except ValueError:
return False
return str(val) == myuuid
def listify_string_name_or_id(s):
if ',' in s:
return s.split(',')
else:
return [s]
def get_hostname_list(module):
# hostname is a list-typed param, so I guess it should return list
# (and it does, in Ansible 2.2.1) but in order to be defensive,
# I keep here the code to convert an eventual string to list
hostnames = module.params.get('hostnames')
count = module.params.get('count')
count_offset = module.params.get('count_offset')
if isinstance(hostnames, str):
hostnames = listify_string_name_or_id(hostnames)
if not isinstance(hostnames, list):
raise Exception("name %s is not convertible to list" % hostnames)
# at this point, hostnames is a list
hostnames = [h.strip() for h in hostnames]
if (len(hostnames) > 1) and (count > 1):
_msg = ("If you set count>1, you should only specify one hostname "
"with the %d formatter, not a list of hostnames.")
raise Exception(_msg)
if (len(hostnames) == 1) and (count > 0):
hostname_spec = hostnames[0]
count_range = range(count_offset, count_offset + count)
if re.search(r"%\d{0,2}d", hostname_spec):
hostnames = [hostname_spec % i for i in count_range]
elif count > 1:
hostname_spec = '%s%%02d' % hostname_spec
hostnames = [hostname_spec % i for i in count_range]
for hn in hostnames:
if not is_valid_hostname(hn):
raise Exception("Hostname '%s' does not seem to be valid" % hn)
if len(hostnames) > MAX_DEVICES:
raise Exception("You specified too many hostnames, max is %d" %
MAX_DEVICES)
return hostnames
def get_device_id_list(module):
device_ids = module.params.get('device_ids')
if isinstance(device_ids, str):
device_ids = listify_string_name_or_id(device_ids)
device_ids = [di.strip() for di in device_ids]
for di in device_ids:
if not is_valid_uuid(di):
raise Exception("Device ID '%s' does not seem to be valid" % di)
if len(device_ids) > MAX_DEVICES:
raise Exception("You specified too many devices, max is %d" %
MAX_DEVICES)
return device_ids
def create_single_device(module, packet_conn, hostname):
for param in ('hostnames', 'operating_system', 'plan'):
if not module.params.get(param):
raise Exception("%s parameter is required for new device."
% param)
project_id = module.params.get('project_id')
plan = module.params.get('plan')
user_data = module.params.get('user_data')
facility = module.params.get('facility')
operating_system = module.params.get('operating_system')
locked = module.params.get('locked')
ipxe_script_url = module.params.get('ipxe_script_url')
always_pxe = module.params.get('always_pxe')
device = packet_conn.create_device(
project_id=project_id,
hostname=hostname,
plan=plan,
facility=facility,
operating_system=operating_system,
userdata=user_data,
locked=locked)
return device
def refresh_device_list(module, packet_conn, devices):
device_ids = [d.id for d in devices]
new_device_list = get_existing_devices(module, packet_conn)
return [d for d in new_device_list if d.id in device_ids]
def wait_for_devices_active(module, packet_conn, watched_devices):
wait_timeout = module.params.get('wait_timeout')
wait_timeout = time.time() + wait_timeout
refreshed = watched_devices
while wait_timeout > time.time():
refreshed = refresh_device_list(module, packet_conn, watched_devices)
if all(d.state == 'active' for d in refreshed):
return refreshed
time.sleep(5)
raise Exception("Waiting for state \"active\" timed out for devices: %s"
% [d.hostname for d in refreshed if d.state != "active"])
def wait_for_public_IPv(module, packet_conn, created_devices):
def has_public_ip(addr_list, ip_v):
return any([a['public'] and a['address_family'] == ip_v and
a['address'] for a in addr_list])
def all_have_public_ip(ds, ip_v):
return all([has_public_ip(d.ip_addresses, ip_v) for d in ds])
address_family = module.params.get('wait_for_public_IPv')
wait_timeout = module.params.get('wait_timeout')
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
refreshed = refresh_device_list(module, packet_conn, created_devices)
if all_have_public_ip(refreshed, address_family):
return refreshed
time.sleep(5)
raise Exception("Waiting for IPv%d address timed out. Hostnames: %s"
% (address_family, [d.hostname for d in created_devices]))
def get_existing_devices(module, packet_conn):
project_id = module.params.get('project_id')
return packet_conn.list_devices(
project_id, params={
'per_page': MAX_DEVICES})
def get_specified_device_identifiers(module):
if module.params.get('device_ids'):
device_id_list = get_device_id_list(module)
return {'ids': device_id_list, 'hostnames': []}
elif module.params.get('hostnames'):
hostname_list = get_hostname_list(module)
return {'hostnames': hostname_list, 'ids': []}
def act_on_devices(module, packet_conn, target_state):
specified_identifiers = get_specified_device_identifiers(module)
existing_devices = get_existing_devices(module, packet_conn)
changed = False
create_hostnames = []
if target_state in ['present', 'active', 'rebooted']:
# states where we might create non-existing specified devices
existing_devices_names = [ed.hostname for ed in existing_devices]
create_hostnames = [hn for hn in specified_identifiers['hostnames']
if hn not in existing_devices_names]
process_devices = [d for d in existing_devices
if (d.id in specified_identifiers['ids']) or
(d.hostname in specified_identifiers['hostnames'])]
if target_state != 'present':
_absent_state_map = {}
for s in PACKET_DEVICE_STATES:
_absent_state_map[s] = packet.Device.delete
state_map = {
'absent': _absent_state_map,
'active': {'inactive': packet.Device.power_on,
'provisioning': None, 'rebooting': None
},
'inactive': {'active': packet.Device.power_off},
'rebooted': {'active': packet.Device.reboot,
'inactive': packet.Device.power_on,
'provisioning': None, 'rebooting': None
},
}
# First do non-creation actions, it might be faster
for d in process_devices:
if d.state == target_state:
continue
if d.state in state_map[target_state]:
api_operation = state_map[target_state].get(d.state)
if api_operation is not None:
api_operation(d)
changed = True
else:
_msg = (
"I don't know how to process existing device %s from state %s "
"to state %s" %
(d.hostname, d.state, target_state))
raise Exception(_msg)
# At last create missing devices
created_devices = []
if create_hostnames:
created_devices = [create_single_device(module, packet_conn, n)
for n in create_hostnames]
if module.params.get('wait_for_public_IPv'):
created_devices = wait_for_public_IPv(
module, packet_conn, created_devices)
changed = True
processed_devices = created_devices + process_devices
if target_state == 'active':
processed_devices = wait_for_devices_active(
module, packet_conn, processed_devices)
return {
'changed': changed,
'devices': [serialize_device(d) for d in processed_devices]
}
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
no_log=True),
count=dict(type='int', default=1),
count_offset=dict(type='int', default=1),
device_ids=dict(type='list'),
facility=dict(),
features=dict(type='dict'),
hostnames=dict(type='list', aliases=['name']),
locked=dict(type='bool', default=False, aliases=['lock']),
operating_system=dict(),
plan=dict(),
project_id=dict(required=True),
state=dict(choices=ALLOWED_STATES, default='present'),
user_data=dict(default=None),
wait_for_public_IPv=dict(type='int', choices=[4, 6]),
wait_timeout=dict(type='int', default=900),
ipxe_script_url=dict(default=''),
always_pxe=dict(type='bool', default=False),
),
required_one_of=[('device_ids', 'hostnames',)],
mutually_exclusive=[
('always_pxe', 'operating_system'),
('ipxe_script_url', 'operating_system'),
('hostnames', 'device_ids'),
('count', 'device_ids'),
('count_offset', 'device_ids'),
]
)
if not HAS_PACKET_SDK:
module.fail_json(msg='packet required for this module')
if not module.params.get('auth_token'):
_fail_msg = ("if Packet API token is not in environment variable %s, "
"the auth_token parameter is required" %
PACKET_API_TOKEN_ENV_VAR)
module.fail_json(msg=_fail_msg)
auth_token = module.params.get('auth_token')
packet_conn = packet.Manager(auth_token=auth_token)
state = module.params.get('state')
try:
module.exit_json(**act_on_devices(module, packet_conn, state))
except Exception as e:
module.fail_json(msg='failed to set device state %s, error: %s' %
(state, to_native(e)), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
openstack-attic/melange
|
refs/heads/master
|
melange/openstack/common/config.py
|
1
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Openstack Projects
"""
import logging
import logging.config
import logging.handlers
import optparse
import os
import sys
from paste import deploy
DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
def parse_options(parser, cli_args=None):
"""
Returns the parsed CLI options, command to run and its arguments, merged
with any same-named options found in a configuration file.
The function returns a tuple of (options, args), where options is a
mapping of option key/str(value) pairs, and args is the set of arguments
(not options) supplied on the command-line.
The reason that the option values are returned as strings only is that
ConfigParser and paste.deploy only accept string values...
:param parser: The option parser
:param cli_args: (Optional) Set of arguments to process. If not present,
sys.argv[1:] is used.
:retval tuple of (options, args)
"""
(options, args) = parser.parse_args(cli_args)
return (vars(options), args)
def add_common_options(parser):
"""
Given a supplied optparse.OptionParser, adds an OptionGroup that
represents all common configuration options.
:param parser: optparse.OptionParser
"""
help_text = ("The following configuration options are common to "
"this app's programs.")
group = optparse.OptionGroup(parser, "Common Options", help_text)
group.add_option('-v', '--verbose', default=False, dest="verbose",
action="store_true",
help="Print more verbose output")
group.add_option('-d', '--debug', default=False, dest="debug",
action="store_true",
help="Print debugging output")
group.add_option('--config-file', default=None, metavar="PATH",
help="Path to the config file to use. When not specified "
"(the default), we generally look at the first "
"argument specified to be a config file, and if "
"that is also missing, we search standard "
"directories for a config file.")
parser.add_option_group(group)
def add_log_options(parser):
"""
Given a supplied optparse.OptionParser, adds an OptionGroup that
represents all the configuration options around logging.
:param parser: optparse.OptionParser
"""
help_text = ("The following configuration options are specific to "
"logging functionality for this program.")
group = optparse.OptionGroup(parser, "Logging Options", help_text)
group.add_option('--log-config', default=None, metavar="PATH",
help="If this option is specified, the logging "
"configuration file specified is used and overrides "
"any other logging options specified. Please see "
"the Python logging module documentation for "
"details on logging configuration files.")
group.add_option('--log-date-format', metavar="FORMAT",
default=DEFAULT_LOG_DATE_FORMAT,
help="Format string for %(asctime)s in log records. "
"Default: %default")
group.add_option('--log-file', default=None, metavar="PATH",
help="(Optional) Name of log file to output to. "
"If not set, logging will go to stdout.")
group.add_option("--log-dir", default=None,
help="(Optional) The directory to keep log files in "
"(will be prepended to --logfile)")
group.add_option('--use-syslog', default=False, dest="use_syslog",
action="store_true",
help="Use syslog for logging.")
parser.add_option_group(group)
def setup_logging(options, conf):
"""
Sets up the logging options for a log with supplied name
:param options: Mapping of typed option key/values
:param conf: Mapping of untyped key/values from config file
"""
if options.get('log_config', None):
# Use a logging configuration file for all settings...
if os.path.exists(options['log_config']):
logging.config.fileConfig(options['log_config'])
return
else:
raise RuntimeError("Unable to locate specified logging "
"config file: %s" % options['log_config'])
# If either the CLI option or the conf value
# is True, we set to True
debug = (options.get('debug') or
get_option(conf, 'debug', type='bool', default=False))
verbose = (options.get('verbose') or
get_option(conf, 'verbose', type='bool', default=False))
root_logger = logging.root
if debug:
root_logger.setLevel(logging.DEBUG)
elif verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
# Set log configuration from options...
# Note that we use a hard-coded log format in the options
# because of Paste.Deploy bug #379
# http://trac.pythonpaste.org/pythonpaste/ticket/379
log_format = options.get('log_format', DEFAULT_LOG_FORMAT)
log_date_format = options.get('log_date_format', DEFAULT_LOG_DATE_FORMAT)
formatter = logging.Formatter(log_format, log_date_format)
logfile = options.get('log_file')
if not logfile:
logfile = conf.get('log_file')
use_syslog = (options.get('use_syslog') or
get_option(conf, 'use_syslog', type='bool', default=False))
if use_syslog:
handler = logging.handlers.SysLogHandler(address='/dev/log')
elif logfile:
logdir = options.get('log_dir')
if not logdir:
logdir = conf.get('log_dir')
if logdir:
logfile = os.path.join(logdir, logfile)
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def fix_path(path):
"""
Return the full absolute path
"""
return os.path.abspath(os.path.expanduser(path))
def find_config_file(app_name, options, args, config_dir=None):
"""
Return the first config file found for an application.
We search for the paste config file in the following order:
* If --config-file option is used, use that
* If args[0] is a file, use that
* Search for $app.conf in standard directories:
* .
* ~.config_dir/
* ~
* /etc/config_dir
* /etc
:retval Full path to config file, or None if no config file found
"""
config_dir = config_dir or app_name
if options.get('config_file'):
if os.path.exists(options['config_file']):
return fix_path(options['config_file'])
elif args:
if os.path.exists(args[0]):
return fix_path(args[0])
# Handle standard directory search for $app_name.conf
config_file_dirs = [fix_path(os.getcwd()),
fix_path(os.path.join('~', '.' + config_dir)),
fix_path('~'),
os.path.join('/etc', config_dir),
'/etc']
for cfg_dir in config_file_dirs:
cfg_file = os.path.join(cfg_dir, '%s.conf' % app_name)
if os.path.exists(cfg_file):
return cfg_file
def load_paste_config(app_name, options, args, config_dir=None):
"""
Looks for a config file to use for an app and returns the
config file path and a configuration mapping from a paste config file.
We search for the paste config file in the following order:
* If --config-file option is used, use that
* If args[0] is a file, use that
* Search for $app_name.conf in standard directories:
* .
* ~.config_dir/
* ~
* /etc/config_dir
* /etc
:param app_name: Name of the application to load config for, or None.
None signifies to only load the [DEFAULT] section of
the config file.
:param options: Set of typed options returned from parse_options()
:param args: Command line arguments from argv[1:]
:retval Tuple of (conf_file, conf)
:raises RuntimeError when config file cannot be located or there was a
problem loading the configuration file.
"""
conf_file = find_config_file(app_name, options, args, config_dir)
if not conf_file:
raise RuntimeError("Unable to locate any configuration file. "
"Cannot load application %s" % app_name)
try:
conf = deploy.appconfig("config:%s" % conf_file, name=app_name)
return conf_file, conf
except Exception, e:
raise RuntimeError("Error trying to load config %s: %s"
% (conf_file, e))
def load_paste_app(app_name, options, args, config_dir=None):
"""
Builds and returns a WSGI app from a paste config file.
We search for the paste config file in the following order:
* If --config-file option is used, use that
* If args[0] is a file, use that
* Search for $app_name.conf in standard directories:
* .
* ~.config_dir/
* ~
* /etc/config_dir
* /etc
:param app_name: Name of the application to load
:param options: Set of typed options returned from parse_options()
:param args: Command line arguments from argv[1:]
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
conf_file, conf = load_paste_config(app_name, options,
args, config_dir)
try:
# Setup logging early, supplying both the CLI options and the
# configuration mapping from the config file
setup_logging(options, conf)
# We only update the conf dict for the verbose and debug
# flags. Everything else must be set up in the conf file...
debug = (options.get('debug') or
get_option(conf, 'debug', type='bool', default=False))
verbose = (options.get('verbose') or
get_option(conf, 'verbose', type='bool', default=False))
conf['debug'] = debug
conf['verbose'] = verbose
# Log the options used when starting if we're in debug mode...
if debug:
logger = logging.getLogger(app_name)
logger.debug("*" * 80)
logger.debug("Configuration options gathered from config file:")
logger.debug(conf_file)
logger.debug("================================================")
items = dict([(k, v) for k, v in conf.items()
if k not in ('__file__', 'here')])
for key, value in sorted(items.items()):
logger.debug("%(key)-30s %(value)s" % locals())
logger.debug("*" * 80)
app = deploy.loadapp("config:%s" % conf_file, name=app_name)
except (LookupError, ImportError), e:
raise RuntimeError("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r" % locals())
return conf, app
def get_option(options, option, **kwargs):
if option in options:
value = options[option]
type_ = kwargs.get('type', 'str')
if type_ == 'bool':
if hasattr(value, 'lower'):
return value.lower() == 'true'
else:
return value
elif type_ == 'int':
return int(value)
elif type_ == 'float':
return float(value)
else:
return value
elif 'default' in kwargs:
return kwargs['default']
else:
raise KeyError("option '%s' not found" % option)
|
NaturalGIS/naturalgis_qgis
|
refs/heads/master
|
python/plugins/processing/gui/MultipleInputDialog.py
|
45
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
MultipleInputDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import warnings
from pathlib import Path
from qgis.core import (QgsSettings,
QgsProcessing,
QgsVectorFileWriter,
QgsProviderRegistry,
QgsProcessingModelChildParameterSource)
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt, QByteArray, QCoreApplication
from qgis.PyQt.QtWidgets import QDialog, QAbstractItemView, QPushButton, QDialogButtonBox, QFileDialog
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgMultipleSelection.ui'))
class MultipleInputDialog(BASE, WIDGET):
def __init__(self, options, selectedoptions=None, datatype=None):
super(MultipleInputDialog, self).__init__(None)
self.setupUi(self)
self.datatype = datatype
self.model = None
self.options = []
for i, option in enumerate(options):
if option is None or isinstance(option, str):
self.options.append((i, option))
else:
self.options.append((option[0], option[1]))
self.selectedoptions = selectedoptions or []
# Additional buttons
self.btnSelectAll = QPushButton(self.tr('Select All'))
self.buttonBox.addButton(self.btnSelectAll,
QDialogButtonBox.ActionRole)
self.btnClearSelection = QPushButton(self.tr('Clear Selection'))
self.buttonBox.addButton(self.btnClearSelection,
QDialogButtonBox.ActionRole)
self.btnToggleSelection = QPushButton(self.tr('Toggle Selection'))
self.buttonBox.addButton(self.btnToggleSelection,
QDialogButtonBox.ActionRole)
if self.datatype is not None:
btnAddFile = QPushButton(QCoreApplication.translate("MultipleInputDialog", 'Add File(s)…'))
btnAddFile.clicked.connect(self.addFiles)
self.buttonBox.addButton(btnAddFile,
QDialogButtonBox.ActionRole)
btnAddDir = QPushButton(QCoreApplication.translate("MultipleInputDialog", 'Add Directory…'))
btnAddDir.clicked.connect(self.addDirectory)
self.buttonBox.addButton(btnAddDir,
QDialogButtonBox.ActionRole)
self.btnSelectAll.clicked.connect(lambda: self.selectAll(True))
self.btnClearSelection.clicked.connect(lambda: self.selectAll(False))
self.btnToggleSelection.clicked.connect(self.toggleSelection)
self.settings = QgsSettings()
self.restoreGeometry(self.settings.value("/Processing/multipleInputDialogGeometry", QByteArray()))
self.lstLayers.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.lstLayers.setDragDropMode(QAbstractItemView.InternalMove)
self.populateList()
self.finished.connect(self.saveWindowGeometry)
def saveWindowGeometry(self):
self.settings.setValue("/Processing/multipleInputDialogGeometry", self.saveGeometry())
def populateList(self):
self.model = QStandardItemModel()
for value, text in self.options:
item = QStandardItem(text)
item.setData(value, Qt.UserRole)
item.setCheckState(Qt.Checked if value in self.selectedoptions else Qt.Unchecked)
item.setCheckable(True)
item.setDropEnabled(False)
self.model.appendRow(item)
# add extra options (e.g. manually added layers)
for t in [o for o in self.selectedoptions if not isinstance(o, int)]:
if isinstance(t, QgsProcessingModelChildParameterSource):
item = QStandardItem(t.staticValue())
else:
item = QStandardItem(t)
item.setData(item.text(), Qt.UserRole)
item.setCheckState(Qt.Checked)
item.setCheckable(True)
item.setDropEnabled(False)
self.model.appendRow(item)
self.lstLayers.setModel(self.model)
def accept(self):
self.selectedoptions = []
model = self.lstLayers.model()
for i in range(model.rowCount()):
item = model.item(i)
if item.checkState() == Qt.Checked:
self.selectedoptions.append(item.data(Qt.UserRole))
QDialog.accept(self)
def reject(self):
self.selectedoptions = None
QDialog.reject(self)
def getItemsToModify(self):
items = []
if len(self.lstLayers.selectedIndexes()) > 1:
for i in self.lstLayers.selectedIndexes():
items.append(self.model.itemFromIndex(i))
else:
for i in range(self.model.rowCount()):
items.append(self.model.item(i))
return items
def selectAll(self, value):
for item in self.getItemsToModify():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def toggleSelection(self):
for item in self.getItemsToModify():
checked = item.checkState() == Qt.Checked
item.setCheckState(Qt.Unchecked if checked else Qt.Checked)
def getFileFilter(self, datatype):
"""
Returns a suitable file filter pattern for the specified parameter definition
:param param:
:return:
"""
if datatype == QgsProcessing.TypeRaster:
return QgsProviderRegistry.instance().fileRasterFilters()
elif datatype == QgsProcessing.TypeFile:
return self.tr('All files (*.*)')
else:
exts = QgsVectorFileWriter.supportedFormatExtensions()
for i in range(len(exts)):
exts[i] = self.tr('{0} files (*.{1})').format(exts[i].upper(), exts[i].lower())
return self.tr('All files (*.*)') + ';;' + ';;'.join(exts)
def addFiles(self):
filter = self.getFileFilter(self.datatype)
settings = QgsSettings()
path = str(settings.value('/Processing/LastInputPath'))
ret, selected_filter = QFileDialog.getOpenFileNames(self, self.tr('Select File(s)'),
path, filter)
if ret:
files = list(ret)
settings.setValue('/Processing/LastInputPath',
os.path.dirname(str(files[0])))
for filename in files:
item = QStandardItem(filename)
item.setData(filename, Qt.UserRole)
item.setCheckState(Qt.Checked)
item.setCheckable(True)
item.setDropEnabled(False)
self.model.appendRow(item)
def addDirectory(self):
settings = QgsSettings()
path = str(settings.value('/Processing/LastInputPath'))
ret = QFileDialog.getExistingDirectory(self, self.tr('Select File(s)'), path)
if ret:
exts = []
if self.datatype == QgsProcessing.TypeVector:
exts = QgsVectorFileWriter.supportedFormatExtensions()
elif self.datatype == QgsProcessing.TypeRaster:
for t in QgsProviderRegistry.instance().fileRasterFilters().split(';;')[1:]:
for e in t[t.index('(') + 1:-1].split(' '):
if e != "*.*" and e.startswith("*."):
exts.append(e[2:])
files = []
for pp in Path(ret).rglob("*"):
if not pp.is_file():
continue
if exts and pp.suffix[1:] not in exts:
continue
p = pp.as_posix()
files.append(p)
settings.setValue('/Processing/LastInputPath', ret)
for filename in files:
item = QStandardItem(filename)
item.setData(filename, Qt.UserRole)
item.setCheckState(Qt.Checked)
item.setCheckable(True)
item.setDropEnabled(False)
self.model.appendRow(item)
|
hefen1/chromium
|
refs/heads/master
|
tools/deep_memory_profiler/lib/symbol.py
|
99
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
_BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
_FIND_RUNTIME_SYMBOLS_PATH = os.path.join(_BASE_PATH,
os.pardir,
'find_runtime_symbols')
_TOOLS_LINUX_PATH = os.path.join(_BASE_PATH,
os.pardir,
'linux')
sys.path.append(_FIND_RUNTIME_SYMBOLS_PATH)
sys.path.append(_TOOLS_LINUX_PATH)
import find_runtime_symbols
import prepare_symbol_info
import procfs # pylint: disable=W0611,F0401
LOGGER = logging.getLogger('dmprof')
FUNCTION_SYMBOLS = find_runtime_symbols.FUNCTION_SYMBOLS
SOURCEFILE_SYMBOLS = find_runtime_symbols.SOURCEFILE_SYMBOLS
TYPEINFO_SYMBOLS = find_runtime_symbols.TYPEINFO_SYMBOLS
class SymbolDataSources(object):
"""Manages symbol data sources in a process.
The symbol data sources consist of maps (/proc/<pid>/maps), nm, readelf and
so on. They are collected into a directory '|prefix|.symmap' from the binary
files by 'prepare()' with tools/find_runtime_symbols/prepare_symbol_info.py.
Binaries are not mandatory to profile. The prepared data sources work in
place of the binary even if the binary has been overwritten with another
binary.
Note that loading the symbol data sources takes a long time. They are often
very big. So, the 'dmprof' profiler is designed to use 'SymbolMappingCache'
which caches actually used symbols.
"""
def __init__(self, prefix, alternative_dirs=None):
self._prefix = prefix
self._prepared_symbol_data_sources_path = None
self._loaded_symbol_data_sources = None
self._alternative_dirs = alternative_dirs or {}
def prepare(self):
"""Prepares symbol data sources by extracting mapping from a binary.
The prepared symbol data sources are stored in a directory. The directory
name is stored in |self._prepared_symbol_data_sources_path|.
Returns:
True if succeeded.
"""
LOGGER.info('Preparing symbol mapping...')
self._prepared_symbol_data_sources_path, used_tempdir = (
prepare_symbol_info.prepare_symbol_info(
self._prefix + '.maps',
output_dir_path=self._prefix + '.symmap',
alternative_dirs=self._alternative_dirs,
use_tempdir=True,
use_source_file_name=True))
if self._prepared_symbol_data_sources_path:
LOGGER.info(' Prepared symbol mapping.')
if used_tempdir:
LOGGER.warn(' Using a temporary directory for symbol mapping.')
LOGGER.warn(' Delete it by yourself.')
LOGGER.warn(' Or, move the directory by yourself to use it later.')
return True
else:
LOGGER.warn(' Failed to prepare symbol mapping.')
return False
def get(self):
"""Returns the prepared symbol data sources.
Returns:
The prepared symbol data sources. None if failed.
"""
if not self._prepared_symbol_data_sources_path and not self.prepare():
return None
if not self._loaded_symbol_data_sources:
LOGGER.info('Loading symbol mapping...')
self._loaded_symbol_data_sources = (
find_runtime_symbols.RuntimeSymbolsInProcess.load(
self._prepared_symbol_data_sources_path))
return self._loaded_symbol_data_sources
def path(self):
"""Returns the path of the prepared symbol data sources if possible."""
if not self._prepared_symbol_data_sources_path and not self.prepare():
return None
return self._prepared_symbol_data_sources_path
class SymbolFinder(object):
"""Finds corresponding symbols from addresses.
This class does only 'find()' symbols from a specified |address_list|.
It is introduced to make a finder mockable.
"""
def __init__(self, symbol_type, symbol_data_sources):
self._symbol_type = symbol_type
self._symbol_data_sources = symbol_data_sources
def find(self, address_list):
return find_runtime_symbols.find_runtime_symbols(
self._symbol_type, self._symbol_data_sources.get(), address_list)
class SymbolMappingCache(object):
"""Caches mapping from actually used addresses to symbols.
'update()' updates the cache from the original symbol data sources via
'SymbolFinder'. Symbols can be looked up by the method 'lookup()'.
"""
def __init__(self):
self._symbol_mapping_caches = {
FUNCTION_SYMBOLS: {},
SOURCEFILE_SYMBOLS: {},
TYPEINFO_SYMBOLS: {},
}
def update(self, symbol_type, bucket_set, symbol_finder, cache_f):
"""Updates symbol mapping cache on memory and in a symbol cache file.
It reads cached symbol mapping from a symbol cache file |cache_f| if it
exists. Unresolved addresses are then resolved and added to the cache
both on memory and in the symbol cache file with using 'SymbolFinder'.
A cache file is formatted as follows:
<Address> <Symbol>
<Address> <Symbol>
<Address> <Symbol>
...
Args:
symbol_type: A type of symbols to update. It should be one of
FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS and TYPEINFO_SYMBOLS.
bucket_set: A BucketSet object.
symbol_finder: A SymbolFinder object to find symbols.
cache_f: A readable and writable IO object of the symbol cache file.
"""
cache_f.seek(0, os.SEEK_SET)
self._load(cache_f, symbol_type)
unresolved_addresses = sorted(
address for address in bucket_set.iter_addresses(symbol_type)
if address not in self._symbol_mapping_caches[symbol_type])
if not unresolved_addresses:
LOGGER.info('No need to resolve any more addresses.')
return
cache_f.seek(0, os.SEEK_END)
LOGGER.info('Loading %d unresolved addresses.' %
len(unresolved_addresses))
symbol_dict = symbol_finder.find(unresolved_addresses)
for address, symbol in symbol_dict.iteritems():
stripped_symbol = symbol.strip() or '?'
self._symbol_mapping_caches[symbol_type][address] = stripped_symbol
cache_f.write('%x %s\n' % (address, stripped_symbol))
def lookup(self, symbol_type, address):
"""Looks up a symbol for a given |address|.
Args:
symbol_type: A type of symbols to update. It should be one of
FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS and TYPEINFO_SYMBOLS.
address: An integer that represents an address.
Returns:
A string that represents a symbol.
"""
return self._symbol_mapping_caches[symbol_type].get(address)
def _load(self, cache_f, symbol_type):
try:
for line in cache_f:
items = line.rstrip().split(None, 1)
if len(items) == 1:
items.append('??')
self._symbol_mapping_caches[symbol_type][int(items[0], 16)] = items[1]
LOGGER.info('Loaded %d entries from symbol cache.' %
len(self._symbol_mapping_caches[symbol_type]))
except IOError as e:
LOGGER.info('The symbol cache file is invalid: %s' % e)
|
2e2a/l-rex
|
refs/heads/master
|
apps/item/migrations/0001_initial.py
|
1
|
# Generated by Django 2.2.14 on 2020-10-28 07:23
from django.db import migrations, models
import django.db.models.deletion
import markdownx.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('lrex_study', '0001_initial'),
('lrex_materials', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(max_length=230, unique=True)),
('number', models.IntegerField(help_text='Number of the item.')),
('condition', models.CharField(help_text='Condition of the item (character limit: 16).', max_length=16)),
('block', models.IntegerField(default=1, help_text='Number of the questionnaire block in which the item will appear.')),
('materials', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='lrex_materials.Materials')),
],
options={
'ordering': ['number', 'condition'],
},
),
migrations.CreateModel(
name='AudioLinkItem',
fields=[
('item_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='lrex_item.Item')),
('urls', models.TextField(help_text='Links to the audio files separated by commas (e.g., https://yourserver.org/item1a-i.ogg,https://yourserver.org/item1a-ii.ogg). ', max_length=5000, verbose_name='URLs')),
('description', markdownx.models.MarkdownxField(blank=True, help_text='Additional description presented with the audio item.', max_length=5000, null=True)),
],
bases=('lrex_item.item',),
),
migrations.CreateModel(
name='MarkdownItem',
fields=[
('item_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='lrex_item.Item')),
('text', markdownx.models.MarkdownxField(help_text='Content of the item with markdown formatting (character limit: 1024).', max_length=1024)),
],
bases=('lrex_item.item',),
),
migrations.CreateModel(
name='TextItem',
fields=[
('item_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='lrex_item.Item')),
('text', models.TextField(help_text='Content of the item (character limit: 1024).', max_length=1024)),
],
bases=('lrex_item.item',),
),
migrations.CreateModel(
name='ItemQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(default=0)),
('question', models.CharField(help_text='Individual question text for this item (e.g. "How acceptable is this sentence?").', max_length=1000)),
('scale_labels', models.CharField(blank=True, help_text='Individual rating scale labels for this item, separated by commas (e.g. "1,2,3,4,5"). If a label contains a comma itself, escape it with "\\" (e.g. "A,B,Can\'t decide\\, I like both"). Note that this will only overwrite the displayed labels, but the responses will be saved according to the general scale specified in the study settings.', max_length=500, null=True)),
('legend', models.CharField(blank=True, help_text='Individual legend for this item to clarify the scale (e.g. "1 = bad, 5 = good")', max_length=1000, null=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_questions', to='lrex_item.Item')),
],
options={
'ordering': ['item', 'number'],
},
),
migrations.CreateModel(
name='ItemList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.IntegerField(default=0)),
('items', models.ManyToManyField(to='lrex_item.Item')),
('materials', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lists', to='lrex_materials.Materials')),
],
options={
'ordering': ['materials', 'number'],
},
),
migrations.CreateModel(
name='ItemFeedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scale_values', models.CharField(help_text='Scale values, separated by commas (e.g. "1,3"). If a label contains a comma itself, escape it with "\\" (e.g. "A,B,Can\'t decide\\, I like both"). The feedback will be shown to the participant if one of these ratings is selected.', max_length=500)),
('feedback', models.TextField(help_text='Feedback shown to the participant for the selected rating scale values.', max_length=5000)),
('item', models.ForeignKey(help_text='Item for the feedback.', on_delete=django.db.models.deletion.CASCADE, related_name='item_feedback', to='lrex_item.Item')),
('question', models.ForeignKey(help_text='Question for the feedback.', on_delete=django.db.models.deletion.CASCADE, to='lrex_study.Question')),
],
options={
'ordering': ['item', 'question', 'pk'],
},
),
]
|
peichman-umd/newspaper-batchload
|
refs/heads/develop
|
classes/ore.py
|
2
|
from rdflib import Literal
from classes import ldp
from namespaces import dcterms, iana, ore, rdf
# alias the RDFlib Namespace
ns = ore
class Proxy(ldp.Resource):
def __init__(self, position, proxy_for, proxy_in):
super(Proxy, self).__init__()
self.title = 'Proxy for {0} in {1}'.format(position, proxy_in.title)
self.prev = None
self.next = None
self.proxy_for = proxy_for
self.proxy_in = proxy_in
def graph(self):
graph = super(Proxy, self).graph()
graph.add((self.uri, rdf.type, ore.Proxy))
graph.add((self.uri, dcterms.title, Literal(self.title)))
graph.add((self.uri, ore.proxyFor, self.proxy_for.uri))
graph.add((self.uri, ore.proxyIn, self.proxy_in.uri))
if self.prev is not None:
graph.add((self.uri, iana.prev, self.prev.uri))
if self.next is not None:
graph.add((self.uri, iana.next, self.next.uri))
return graph
# create proxy object by PUTting object graph
def create_object(self, repository, **kwargs):
uri='/'.join([p.strip('/') for p in (self.proxy_for.uri, self.proxy_in.uuid)])
super(Proxy, self).create_object(repository, uri=uri, **kwargs)
|
tgoettel9401/Homepage-Schachclub-Niedermohr
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/generator/make.py
|
896
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
daoluan/decode-Django
|
refs/heads/master
|
Django-1.5.1/tests/modeltests/properties/tests.py
|
126
|
from __future__ import absolute_import
from django.test import TestCase
from .models import Person
class PropertyTests(TestCase):
def setUp(self):
self.a = Person(first_name='John', last_name='Lennon')
self.a.save()
def test_getter(self):
self.assertEqual(self.a.full_name, 'John Lennon')
def test_setter(self):
# The "full_name" property hasn't provided a "set" method.
self.assertRaises(AttributeError, setattr, self.a, 'full_name', 'Paul McCartney')
# But "full_name_2" has, and it can be used to initialise the class.
a2 = Person(full_name_2 = 'Paul McCartney')
a2.save()
self.assertEqual(a2.first_name, 'Paul')
|
vlachoudis/sl4a
|
refs/heads/master
|
python/gdata/src/gdata/tlslite/utils/OpenSSL_TripleDES.py
|
359
|
"""OpenSSL/M2Crypto 3DES implementation."""
from cryptomath import *
from TripleDES import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return ciphertext
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return plaintext
|
rimbalinux/LMD3
|
refs/heads/master
|
django/contrib/localflavor/pl/pl_administrativeunits.py
|
16
|
# -*- coding: utf-8 -*-
"""
Polish administrative units as in http://pl.wikipedia.org/wiki/Podzia%C5%82_administracyjny_Polski
"""
ADMINISTRATIVE_UNIT_CHOICES = (
('wroclaw', u'Wrocław'),
('jeleniagora', u'Jelenia Góra'),
('legnica', u'Legnica'),
('boleslawiecki', u'bolesławiecki'),
('dzierzoniowski', u'dzierżoniowski'),
('glogowski', u'głogowski'),
('gorowski', u'górowski'),
('jaworski', u'jaworski'),
('jeleniogorski', u'jeleniogórski'),
('kamiennogorski', u'kamiennogórski'),
('klodzki', u'kłodzki'),
('legnicki', u'legnicki'),
('lubanski', u'lubański'),
('lubinski', u'lubiński'),
('lwowecki', u'lwówecki'),
('milicki', u'milicki'),
('olesnicki', u'oleśnicki'),
('olawski', u'oławski'),
('polkowicki', u'polkowicki'),
('strzelinski', u'strzeliński'),
('sredzki', u'średzki'),
('swidnicki', u'świdnicki'),
('trzebnicki', u'trzebnicki'),
('walbrzyski', u'wałbrzyski'),
('wolowski', u'wołowski'),
('wroclawski', u'wrocławski'),
('zabkowicki', u'ząbkowicki'),
('zgorzelecki', u'zgorzelecki'),
('zlotoryjski', u'złotoryjski'),
('bydgoszcz', u'Bydgoszcz'),
('torun', u'Toruń'),
('wloclawek', u'Włocławek'),
('grudziadz', u'Grudziądz'),
('aleksandrowski', u'aleksandrowski'),
('brodnicki', u'brodnicki'),
('bydgoski', u'bydgoski'),
('chelminski', u'chełmiński'),
('golubsko-dobrzynski', u'golubsko-dobrzyński'),
('grudziadzki', u'grudziądzki'),
('inowroclawski', u'inowrocławski'),
('lipnowski', u'lipnowski'),
('mogilenski', u'mogileński'),
('nakielski', u'nakielski'),
('radziejowski', u'radziejowski'),
('rypinski', u'rypiński'),
('sepolenski', u'sępoleński'),
('swiecki', u'świecki'),
('torunski', u'toruński'),
('tucholski', u'tucholski'),
('wabrzeski', u'wąbrzeski'),
('wloclawski', u'wrocławski'),
('zninski', u'źniński'),
('lublin', u'Lublin'),
('biala-podlaska', u'Biała Podlaska'),
('chelm', u'Chełm'),
('zamosc', u'Zamość'),
('bialski', u'bialski'),
('bilgorajski', u'biłgorajski'),
('chelmski', u'chełmski'),
('hrubieszowski', u'hrubieszowski'),
('janowski', u'janowski'),
('krasnostawski', u'krasnostawski'),
('krasnicki', u'kraśnicki'),
('lubartowski', u'lubartowski'),
('lubelski', u'lubelski'),
('leczynski', u'łęczyński'),
('lukowski', u'łukowski'),
('opolski', u'opolski'),
('parczewski', u'parczewski'),
('pulawski', u'puławski'),
('radzynski', u'radzyński'),
('rycki', u'rycki'),
('swidnicki', u'świdnicki'),
('tomaszowski', u'tomaszowski'),
('wlodawski', u'włodawski'),
('zamojski', u'zamojski'),
('gorzow-wielkopolski', u'Gorzów Wielkopolski'),
('zielona-gora', u'Zielona Góra'),
('gorzowski', u'gorzowski'),
('krosnienski', u'krośnieński'),
('miedzyrzecki', u'międzyrzecki'),
('nowosolski', u'nowosolski'),
('slubicki', u'słubicki'),
('strzelecko-drezdenecki', u'strzelecko-drezdenecki'),
('sulecinski', u'suleńciński'),
('swiebodzinski', u'świebodziński'),
('wschowski', u'wschowski'),
('zielonogorski', u'zielonogórski'),
('zaganski', u'żagański'),
('zarski', u'żarski'),
('lodz', u'Łódź'),
('piotrkow-trybunalski', u'Piotrków Trybunalski'),
('skierniewice', u'Skierniewice'),
('belchatowski', u'bełchatowski'),
('brzezinski', u'brzeziński'),
('kutnowski', u'kutnowski'),
('laski', u'łaski'),
('leczycki', u'łęczycki'),
('lowicki', u'łowicki'),
('lodzki wschodni', u'łódzki wschodni'),
('opoczynski', u'opoczyński'),
('pabianicki', u'pabianicki'),
('pajeczanski', u'pajęczański'),
('piotrkowski', u'piotrkowski'),
('poddebicki', u'poddębicki'),
('radomszczanski', u'radomszczański'),
('rawski', u'rawski'),
('sieradzki', u'sieradzki'),
('skierniewicki', u'skierniewicki'),
('tomaszowski', u'tomaszowski'),
('wielunski', u'wieluński'),
('wieruszowski', u'wieruszowski'),
('zdunskowolski', u'zduńskowolski'),
('zgierski', u'zgierski'),
('krakow', u'Kraków'),
('tarnow', u'Tarnów'),
('nowy-sacz', u'Nowy Sącz'),
('bochenski', u'bocheński'),
('brzeski', u'brzeski'),
('chrzanowski', u'chrzanowski'),
('dabrowski', u'dąbrowski'),
('gorlicki', u'gorlicki'),
('krakowski', u'krakowski'),
('limanowski', u'limanowski'),
('miechowski', u'miechowski'),
('myslenicki', u'myślenicki'),
('nowosadecki', u'nowosądecki'),
('nowotarski', u'nowotarski'),
('olkuski', u'olkuski'),
('oswiecimski', u'oświęcimski'),
('proszowicki', u'proszowicki'),
('suski', u'suski'),
('tarnowski', u'tarnowski'),
('tatrzanski', u'tatrzański'),
('wadowicki', u'wadowicki'),
('wielicki', u'wielicki'),
('warszawa', u'Warszawa'),
('ostroleka', u'Ostrołęka'),
('plock', u'Płock'),
('radom', u'Radom'),
('siedlce', u'Siedlce'),
('bialobrzeski', u'białobrzeski'),
('ciechanowski', u'ciechanowski'),
('garwolinski', u'garwoliński'),
('gostyninski', u'gostyniński'),
('grodziski', u'grodziski'),
('grojecki', u'grójecki'),
('kozienicki', u'kozenicki'),
('legionowski', u'legionowski'),
('lipski', u'lipski'),
('losicki', u'łosicki'),
('makowski', u'makowski'),
('minski', u'miński'),
('mlawski', u'mławski'),
('nowodworski', u'nowodworski'),
('ostrolecki', u'ostrołęcki'),
('ostrowski', u'ostrowski'),
('otwocki', u'otwocki'),
('piaseczynski', u'piaseczyński'),
('plocki', u'płocki'),
('plonski', u'płoński'),
('pruszkowski', u'pruszkowski'),
('przasnyski', u'przasnyski'),
('przysuski', u'przysuski'),
('pultuski', u'pułtuski'),
('radomski', u'radomski'),
('siedlecki', u'siedlecki'),
('sierpecki', u'sierpecki'),
('sochaczewski', u'sochaczewski'),
('sokolowski', u'sokołowski'),
('szydlowiecki', u'szydłowiecki'),
('warszawski-zachodni', u'warszawski zachodni'),
('wegrowski', u'węgrowski'),
('wolominski', u'wołomiński'),
('wyszkowski', u'wyszkowski'),
('zwolenski', u'zwoleński'),
('zurominski', u'żuromiński'),
('zyrardowski', u'żyrardowski'),
('opole', u'Opole'),
('brzeski', u'brzeski'),
('glubczycki', u'głubczyski'),
('kedzierzynsko-kozielski', u'kędzierzyński-kozielski'),
('kluczborski', u'kluczborski'),
('krapkowicki', u'krapkowicki'),
('namyslowski', u'namysłowski'),
('nyski', u'nyski'),
('oleski', u'oleski'),
('opolski', u'opolski'),
('prudnicki', u'prudnicki'),
('strzelecki', u'strzelecki'),
('rzeszow', u'Rzeszów'),
('krosno', u'Krosno'),
('przemysl', u'Przemyśl'),
('tarnobrzeg', u'Tarnobrzeg'),
('bieszczadzki', u'bieszczadzki'),
('brzozowski', u'brzozowski'),
('debicki', u'dębicki'),
('jaroslawski', u'jarosławski'),
('jasielski', u'jasielski'),
('kolbuszowski', u'kolbuszowski'),
('krosnienski', u'krośnieński'),
('leski', u'leski'),
('lezajski', u'leżajski'),
('lubaczowski', u'lubaczowski'),
('lancucki', u'łańcucki'),
('mielecki', u'mielecki'),
('nizanski', u'niżański'),
('przemyski', u'przemyski'),
('przeworski', u'przeworski'),
('ropczycko-sedziszowski', u'ropczycko-sędziszowski'),
('rzeszowski', u'rzeszowski'),
('sanocki', u'sanocki'),
('stalowowolski', u'stalowowolski'),
('strzyzowski', u'strzyżowski'),
('tarnobrzeski', u'tarnobrzeski'),
('bialystok', u'Białystok'),
('lomza', u'Łomża'),
('suwalki', u'Suwałki'),
('augustowski', u'augustowski'),
('bialostocki', u'białostocki'),
('bielski', u'bielski'),
('grajewski', u'grajewski'),
('hajnowski', u'hajnowski'),
('kolnenski', u'kolneński'),
('łomzynski', u'łomżyński'),
('moniecki', u'moniecki'),
('sejnenski', u'sejneński'),
('siemiatycki', u'siematycki'),
('sokolski', u'sokólski'),
('suwalski', u'suwalski'),
('wysokomazowiecki', u'wysokomazowiecki'),
('zambrowski', u'zambrowski'),
('gdansk', u'Gdańsk'),
('gdynia', u'Gdynia'),
('slupsk', u'Słupsk'),
('sopot', u'Sopot'),
('bytowski', u'bytowski'),
('chojnicki', u'chojnicki'),
('czluchowski', u'człuchowski'),
('kartuski', u'kartuski'),
('koscierski', u'kościerski'),
('kwidzynski', u'kwidzyński'),
('leborski', u'lęborski'),
('malborski', u'malborski'),
('nowodworski', u'nowodworski'),
('gdanski', u'gdański'),
('pucki', u'pucki'),
('slupski', u'słupski'),
('starogardzki', u'starogardzki'),
('sztumski', u'sztumski'),
('tczewski', u'tczewski'),
('wejherowski', u'wejcherowski'),
('katowice', u'Katowice'),
('bielsko-biala', u'Bielsko-Biała'),
('bytom', u'Bytom'),
('chorzow', u'Chorzów'),
('czestochowa', u'Częstochowa'),
('dabrowa-gornicza', u'Dąbrowa Górnicza'),
('gliwice', u'Gliwice'),
('jastrzebie-zdroj', u'Jastrzębie Zdrój'),
('jaworzno', u'Jaworzno'),
('myslowice', u'Mysłowice'),
('piekary-slaskie', u'Piekary Śląskie'),
('ruda-slaska', u'Ruda Śląska'),
('rybnik', u'Rybnik'),
('siemianowice-slaskie', u'Siemianowice Śląskie'),
('sosnowiec', u'Sosnowiec'),
('swietochlowice', u'Świętochłowice'),
('tychy', u'Tychy'),
('zabrze', u'Zabrze'),
('zory', u'Żory'),
('bedzinski', u'będziński'),
('bielski', u'bielski'),
('bierunsko-ledzinski', u'bieruńsko-lędziński'),
('cieszynski', u'cieszyński'),
('czestochowski', u'częstochowski'),
('gliwicki', u'gliwicki'),
('klobucki', u'kłobucki'),
('lubliniecki', u'lubliniecki'),
('mikolowski', u'mikołowski'),
('myszkowski', u'myszkowski'),
('pszczynski', u'pszczyński'),
('raciborski', u'raciborski'),
('rybnicki', u'rybnicki'),
('tarnogorski', u'tarnogórski'),
('wodzislawski', u'wodzisławski'),
('zawiercianski', u'zawierciański'),
('zywiecki', u'żywiecki'),
('kielce', u'Kielce'),
('buski', u'buski'),
('jedrzejowski', u'jędrzejowski'),
('kazimierski', u'kazimierski'),
('kielecki', u'kielecki'),
('konecki', u'konecki'),
('opatowski', u'opatowski'),
('ostrowiecki', u'ostrowiecki'),
('pinczowski', u'pińczowski'),
('sandomierski', u'sandomierski'),
('skarzyski', u'skarżyski'),
('starachowicki', u'starachowicki'),
('staszowski', u'staszowski'),
('wloszczowski', u'włoszczowski'),
('olsztyn', u'Olsztyn'),
('elblag', u'Elbląg'),
('bartoszycki', u'bartoszycki'),
('braniewski', u'braniewski'),
('dzialdowski', u'działdowski'),
('elblaski', u'elbląski'),
('elcki', u'ełcki'),
('gizycki', u'giżycki'),
('goldapski', u'gołdapski'),
('ilawski', u'iławski'),
('ketrzynski', u'kętrzyński'),
('lidzbarski', u'lidzbarski'),
('mragowski', u'mrągowski'),
('nidzicki', u'nidzicki'),
('nowomiejski', u'nowomiejski'),
('olecki', u'olecki'),
('olsztynski', u'olsztyński'),
('ostrodzki', u'ostródzki'),
('piski', u'piski'),
('szczycienski', u'szczycieński'),
('wegorzewski', u'węgorzewski'),
('poznan', u'Poznań'),
('kalisz', u'Kalisz'),
('konin', u'Konin'),
('leszno', u'Leszno'),
('chodzieski', u'chodziejski'),
('czarnkowsko-trzcianecki', u'czarnkowsko-trzcianecki'),
('gnieznienski', u'gnieźnieński'),
('gostynski', u'gostyński'),
('grodziski', u'grodziski'),
('jarocinski', u'jarociński'),
('kaliski', u'kaliski'),
('kepinski', u'kępiński'),
('kolski', u'kolski'),
('koninski', u'koniński'),
('koscianski', u'kościański'),
('krotoszynski', u'krotoszyński'),
('leszczynski', u'leszczyński'),
('miedzychodzki', u'międzychodzki'),
('nowotomyski', u'nowotomyski'),
('obornicki', u'obornicki'),
('ostrowski', u'ostrowski'),
('ostrzeszowski', u'ostrzeszowski'),
('pilski', u'pilski'),
('pleszewski', u'pleszewski'),
('poznanski', u'poznański'),
('rawicki', u'rawicki'),
('slupecki', u'słupecki'),
('szamotulski', u'szamotulski'),
('sredzki', u'średzki'),
('sremski', u'śremski'),
('turecki', u'turecki'),
('wagrowiecki', u'wągrowiecki'),
('wolsztynski', u'wolsztyński'),
('wrzesinski', u'wrzesiński'),
('zlotowski', u'złotowski'),
('bialogardzki', u'białogardzki'),
('choszczenski', u'choszczeński'),
('drawski', u'drawski'),
('goleniowski', u'goleniowski'),
('gryficki', u'gryficki'),
('gryfinski', u'gryfiński'),
('kamienski', u'kamieński'),
('kolobrzeski', u'kołobrzeski'),
('koszalinski', u'koszaliński'),
('lobeski', u'łobeski'),
('mysliborski', u'myśliborski'),
('policki', u'policki'),
('pyrzycki', u'pyrzycki'),
('slawienski', u'sławieński'),
('stargardzki', u'stargardzki'),
('szczecinecki', u'szczecinecki'),
('swidwinski', u'świdwiński'),
('walecki', u'wałecki'),
)
|
techtonik/pip
|
refs/heads/master
|
src/pip/_vendor/urllib3/contrib/socks.py
|
65
|
# -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
- SOCKS4
- SOCKS4a
- SOCKS5
- Usernames and passwords for the SOCKS proxy
Known Limitations:
- Currently PySocks does not support contacting remote websites via literal
IPv6 addresses. Any such connection attempt will fail. You must use a domain
name.
- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
such connection attempt will fail.
"""
from __future__ import absolute_import
try:
import socks
except ImportError:
import warnings
from ..exceptions import DependencyWarning
warnings.warn((
'SOCKS support in urllib3 requires the installation of optional '
'dependencies: specifically, PySocks. For more information, see '
'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
),
DependencyWarning
)
raise
from socket import error as SocketError, timeout as SocketTimeout
from ..connection import (
HTTPConnection, HTTPSConnection
)
from ..connectionpool import (
HTTPConnectionPool, HTTPSConnectionPool
)
from ..exceptions import ConnectTimeoutError, NewConnectionError
from ..poolmanager import PoolManager
from ..util.url import parse_url
try:
import ssl
except ImportError:
ssl = None
class SOCKSConnection(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop('_socks_options')
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options['socks_version'],
proxy_addr=self._socks_options['proxy_host'],
proxy_port=self._socks_options['proxy_port'],
proxy_username=self._socks_options['username'],
proxy_password=self._socks_options['password'],
proxy_rdns=self._socks_options['rdns'],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout)
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
pass
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = SOCKSConnection
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
'http': SOCKSHTTPConnectionPool,
'https': SOCKSHTTPSConnectionPool,
}
def __init__(self, proxy_url, username=None, password=None,
num_pools=10, headers=None, **connection_pool_kw):
parsed = parse_url(proxy_url)
if username is None and password is None and parsed.auth is not None:
split = parsed.auth.split(':')
if len(split) == 2:
username, password = split
if parsed.scheme == 'socks5':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = False
elif parsed.scheme == 'socks5h':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = True
elif parsed.scheme == 'socks4':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = False
elif parsed.scheme == 'socks4a':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = True
else:
raise ValueError(
"Unable to determine SOCKS version from %s" % proxy_url
)
self.proxy_url = proxy_url
socks_options = {
'socks_version': socks_version,
'proxy_host': parsed.host,
'proxy_port': parsed.port,
'username': username,
'password': password,
'rdns': rdns
}
connection_pool_kw['_socks_options'] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
|
ajaniv/django-core-models
|
refs/heads/master
|
django_core_models/core/apps.py
|
1
|
"""
.. module:: django_core_models.core.apps
:synopsis: django_core_models core application configuration module.
django_core_models core application configuration module.
"""
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class CoreModelsConfig(AppConfig):
"""Django's core application configuration class."""
name = __package__
verbose_name = _("Core Models")
|
mcarton/thefuck
|
refs/heads/master
|
thefuck/rules/django_south_merge.py
|
17
|
def match(command):
return 'manage.py' in command.script and \
'migrate' in command.script \
and '--merge: will just attempt the migration' in command.stderr
def get_new_command(command):
return u'{} --merge'.format(command.script)
|
Samuel789/MediPi
|
refs/heads/master
|
MedManagementWeb/env/lib/python3.5/site-packages/django/contrib/sessions/management/commands/clearsessions.py
|
729
|
from importlib import import_module
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = (
"Can be run as a cronjob or directly to clean out expired sessions "
"(only with the database backend at the moment)."
)
def handle(self, **options):
engine = import_module(settings.SESSION_ENGINE)
try:
engine.SessionStore.clear_expired()
except NotImplementedError:
self.stderr.write("Session engine '%s' doesn't support clearing "
"expired sessions.\n" % settings.SESSION_ENGINE)
|
programulya/three.js
|
refs/heads/master
|
utils/converters/obj/convert_obj_three.py
|
160
|
"""Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-d invert|normal invert transparency
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
original model is assumed to use non-inverted transparency / dissolve (0.0 fully transparent, 1.0 fully opaque)
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", createScene );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", createScene );
function createScene( geometry, materials ) {
var mesh = new THREE.Mesh( geometry, new THREE.MeshFaceMaterial( materials ) );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRANSPARENCY = "normal" # normal invert
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in xrange(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath.replace("\\", "/"))
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "newmtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Material start
# newmtl identifier
if chunks[0] == "newmtl":
if len(chunks) > 1:
identifier = chunks[1]
else:
identifier = ""
if not identifier in materials:
materials[identifier] = {}
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Ambient texture
# map_Ka texture_ambient.jpg
if chunks[0] == "map_Ka" and len(chunks) == 2:
materials[identifier]["mapAmbient"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["transparent"] = True
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Ambient color
# Ka 1.000 1.000 1.000
if chunks[0] == "Ka" and len(chunks) == 4:
materials[identifier]["colorAmbient"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Transparency
# Tr 0.9 or d 0.9
if (chunks[0] == "Tr" or chunks[0] == "d") and len(chunks) == 2:
materials[identifier]["transparent"] = True
if TRANSPARENCY == "invert":
materials[identifier]["transparency"] = 1.0 - float(chunks[1])
else:
materials[identifier]["transparency"] = float(chunks[1])
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
material = ""
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "usemtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl":
if len(chunks) > 1:
material = chunks[1]
else:
material = ""
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
# Precompute vert / normal / uv lists
# for negative index lookup
vertlen = len(vertices) + 1
normlen = len(normals) + 1
uvlen = len(uvs) + 1
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
if vertex['v'] < 0:
vertex['v'] += vertlen
vertex_index.append(vertex['v'])
if vertex['t']:
if vertex['t'] < 0:
vertex['t'] += uvlen
uv_index.append(vertex['t'])
if vertex['n']:
if vertex['n'] < 0:
vertex['n'] += normlen
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in xrange(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in xrange(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in xrange(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print "adding [%s] with %d vertices" % (name, n_morph_vertices)
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
elif n_faces != n_morph_faces:
print "WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces)
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print "adding [%s] with %d face colors" % (name, len(morphFaceColors))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print "Couldn't find [%s]" % fname
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print "%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack('<12s', 'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write("".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-d", "--dissolve"):
if a in ("normal", "invert"):
TRANSPARENCY = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Converting [%s] into [%s] ..." % (infile, outfile)
if morphfiles:
print "Morphs [%s]" % morphfiles
if colorfiles:
print "Colors [%s]" % colorfiles
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
|
MingdaZhou/gnuradio
|
refs/heads/master
|
gr-dtv/python/dtv/qa_dtv.py
|
46
|
#!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, dtv
class test_dtv(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
pass
if __name__ == '__main__':
gr_unittest.run(test_dtv, "test_dtv.xml")
|
tschijnmo/drudge
|
refs/heads/master
|
tests/eldag_canon_test.py
|
1
|
"""Tests for the canonicalization facility for Eldags."""
from drudge import Perm, Group
from drudge.canonpy import canon_eldag
def test_eldag_can_be_canonicalized():
"""Tests the Eldag canonicalization facility.
Note that this test more focuses on better coverage in the canonpy interface
to libcanon, rather than on the correctness of canonicalization algorithm,
which should be already tested within libcanon.
In this test, we have two bivalent nodes in the Eldag, one without symmetry,
one with symmetry. They are both connected to two terminal nodes with the
same colour.
In this graph, the connection to the non-symmetric node determines the
resulted permutations.
"""
transp = Perm([1, 0], 1)
symms = [None, Group([transp]), None, None]
colours = [0, 1, 1, 1] # We force the non-symmetric node to come earlier.
for if_same in [True, False]:
# If the non-symmetric node is connected to the two terminal nodes in
# order. The symmetric node always connect to them in order.
edges = [2, 3, 2, 3] if if_same else [3, 2, 2, 3]
ia = [0, 2, 4, 4, 4]
node_order, perms = canon_eldag(edges, ia, symms, colours)
# Assertions applicable to both cases.
assert node_order[0] == 0
assert node_order[1] == 1
for i in [0, 2, 3]:
assert perms[i] is None
continue
# The ordering of the two terminals.
if if_same:
assert node_order[2:] == [2, 3]
else:
assert node_order[2:] == [3, 2]
# The permutation of the symmetric node.
perm = perms[1]
if if_same:
assert perm[0] == 0
assert perm[1] == 1
assert perm.acc == 0
else:
assert perm[0] == 1
assert perm[1] == 0
assert perm.acc == 1
continue
return
|
gugahoi/maraschino
|
refs/heads/master
|
lib/transmissionrpc/__init__.py
|
10
|
# -*- coding: utf-8 -*-
# Copyright (c) 2008-2011 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license.
from transmissionrpc.constants import DEFAULT_PORT, DEFAULT_TIMEOUT, PRIORITY, RATIO_LIMIT, LOGGER
from transmissionrpc.error import TransmissionError, HTTPHandlerError
from transmissionrpc.httphandler import HTTPHandler, DefaultHTTPHandler
from transmissionrpc.torrent import Torrent
from transmissionrpc.session import Session
from transmissionrpc.client import Client
from transmissionrpc.utils import add_stdout_logger
__author__ = u'Erik Svensson <erik.public@gmail.com>'
__version__ = u'0.8'
__copyright__ = u'Copyright (c) 2008-2011 Erik Svensson'
__license__ = u'MIT'
|
foursquare/commons-old
|
refs/heads/master
|
src/python/twitter/common/decorators/lru_cache.py
|
16
|
# Taken from http://code.activestate.com/recipes/578078
# A 2.6.x backport of Python 3.3 functools.lru_cache
#
# Modified from 4=>2 length indents for code consistency.
#
# Added an on_eviction parameter that gets applied to each element
# when it is evicted from the cache, e.g. file.close(...).
from collections import namedtuple
from functools import update_wrapper
from threading import Lock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
def lru_cache(maxsize=100, typed=False, on_eviction=lambda x:x):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
kwd_mark = (object(),) # separate positional and keyword args
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = Lock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
nonlocal_root = [root] # make updateable non-locally
root[:] = [root, root, None, None] # initialize by pointing to self
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
def make_key(args, kwds, typed, tuple=tuple, sorted=sorted, type=type):
# helper function to build a cache key from positional and keyword args
key = args
if kwds:
sorted_items = tuple(sorted(kwds.items()))
key += kwd_mark + sorted_items
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
return key
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed) if kwds or typed else args
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root = nonlocal_root[0]
if _len(cache) < maxsize:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
cache[key] = last[NEXT] = root[PREV] = link
else:
# use root to store the new key and result
root[KEY] = key
root[RESULT] = result
cache[key] = root
# empty the oldest link and make it the new root
root = nonlocal_root[0] = root[NEXT]
evicted = cache.pop(root[KEY])
on_eviction(evicted[RESULT])
root[KEY] = None
root[RESULT] = None
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
for value in cache.values():
on_eviction(value[RESULT])
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
|
homme/ansible
|
refs/heads/devel
|
lib/ansible/new_inventory/__init__.py
|
170
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.inventory.group import Group
from .host import Host
from ansible.plugins.inventory.aggregate import InventoryAggregateParser
class Inventory:
'''
Create hosts and groups from inventory
Retrieve the hosts and groups that ansible knows about from this class.
Retrieve raw variables (non-expanded) from the Group and Host classes
returned from here.
'''
def __init__(self, inventory_list=C.DEFAULT_HOST_LIST):
'''
:kwarg inventory_list: A list of inventory sources. This may be file
names which will be parsed as ini-like files, executable scripts
which return inventory data as json, directories of both of the above,
or hostnames. Files and directories are
:kwarg vault_password: Password to use if any of the inventory sources
are in an ansible vault
'''
self._restricted_to = None
self._filter_pattern = None
parser = InventoryAggregateParser(inventory_list)
parser.parse()
self._basedir = parser.basedir
self._hosts = parser.hosts
self._groups = parser.groups
def get_hosts(self):
'''
Return the list of hosts, after filtering based on any set pattern
and restricting the results based on the set host restrictions.
'''
if self._filter_pattern:
hosts = self._filter_hosts()
else:
hosts = self._hosts[:]
if self._restricted_to is not None:
# this will preserve the order of hosts after intersecting them
res_set = set(hosts).intersection(self._restricted_to)
return [h for h in hosts if h in res_set]
else:
return hosts[:]
def get_groups(self):
'''
Retrieve the Group objects known to the Inventory
'''
return self._groups[:]
def get_host(self, hostname):
'''
Retrieve the Host object for a hostname
'''
for host in self._hosts:
if host.name == hostname:
return host
return None
def get_group(self, groupname):
'''
Retrieve the Group object for a groupname
'''
for group in self._groups:
if group.name == group_name:
return group
return None
def add_group(self, group):
'''
Add a new group to the inventory
'''
if group not in self._groups:
self._groups.append(group)
def set_filter_pattern(self, pattern='all'):
'''
Sets a pattern upon which hosts/groups will be filtered.
This pattern can contain logical groupings such as unions,
intersections and negations using special syntax.
'''
self._filter_pattern = pattern
def set_host_restriction(self, restriction):
'''
Restrict operations to hosts in the given list
'''
assert isinstance(restriction, list)
self._restricted_to = restriction[:]
def remove_host_restriction(self):
'''
Remove the restriction on hosts, if any.
'''
self._restricted_to = None
def _filter_hosts(self):
"""
Limits inventory results to a subset of inventory that matches a given
list of patterns, such as to select a subset of a hosts selection that also
belongs to a certain geographic group or numeric slice.
Corresponds to --limit parameter to ansible-playbook
:arg patterns: The pattern to limit with. If this is None it
clears the subset. Multiple patterns may be specified as a comma,
semicolon, or colon separated string.
"""
hosts = []
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
patterns = self._pattern.replace(";",":").split(":")
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
patterns = pattern_regular + pattern_intersection + pattern_exclude
for p in patterns:
intersect = False
negate = False
if p.startswith('&'):
intersect = True
elif p.startswith('!'):
p = p[1:]
negate = True
target = self._resolve_pattern(p)
if isinstance(target, Host):
if negate and target in hosts:
# remove it
hosts.remove(target)
elif target not in hosts:
# for both union and intersections, we just append it
hosts.append(target)
else:
if intersect:
hosts = [ h for h in hosts if h not in target ]
elif negate:
hosts = [ h for h in hosts if h in target ]
else:
to_append = [ h for h in target if h.name not in [ y.name for y in hosts ] ]
hosts.extend(to_append)
return hosts
def _resolve_pattern(self, pattern):
target = self.get_host(pattern)
if target:
return target
else:
(name, enumeration_details) = self._enumeration_info(pattern)
hpat = self._hosts_in_unenumerated_pattern(name)
result = self._apply_ranges(pattern, hpat)
return result
def _enumeration_info(self, pattern):
"""
returns (pattern, limits) taking a regular pattern and finding out
which parts of it correspond to start/stop offsets. limits is
a tuple of (start, stop) or None
"""
# Do not parse regexes for enumeration info
if pattern.startswith('~'):
return (pattern, None)
# The regex used to match on the range, which can be [x] or [x-y].
pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
m = pattern_re.match(pattern)
if m:
(target, first, last, rest) = m.groups()
first = int(first)
if last:
if first < 0:
raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
last = int(last)
else:
last = first
return (target, (first, last))
else:
return (pattern, None)
def _apply_ranges(self, pat, hosts):
"""
given a pattern like foo, that matches hosts, return all of hosts
given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
"""
# If there are no hosts to select from, just return the
# empty set. This prevents trying to do selections on an empty set.
# issue#6258
if not hosts:
return hosts
(loose_pattern, limits) = self._enumeration_info(pat)
if not limits:
return hosts
(left, right) = limits
if left == '':
left = 0
if right == '':
right = 0
left=int(left)
right=int(right)
try:
if left != right:
return hosts[left:right]
else:
return [ hosts[left] ]
except IndexError:
raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat)
def _hosts_in_unenumerated_pattern(self, pattern):
""" Get all host names matching the pattern """
results = []
hosts = []
hostnames = set()
# ignore any negative checks here, this is handled elsewhere
pattern = pattern.replace("!","").replace("&", "")
def __append_host_to_results(host):
if host not in results and host.name not in hostnames:
hostnames.add(host.name)
results.append(host)
groups = self.get_groups()
for group in groups:
if pattern == 'all':
for host in group.get_hosts():
__append_host_to_results(host)
else:
if self._match(group.name, pattern):
for host in group.get_hosts():
__append_host_to_results(host)
else:
matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
for host in matching_hosts:
__append_host_to_results(host)
if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
new_host = self._create_implicit_localhost(pattern)
results.append(new_host)
return results
def _create_implicit_localhost(self, pattern):
new_host = Host(pattern)
new_host._connection = 'local'
new_host.set_variable("ansible_python_interpreter", sys.executable)
ungrouped = self.get_group("ungrouped")
if ungrouped is None:
self.add_group(Group('ungrouped'))
ungrouped = self.get_group('ungrouped')
self.get_group('all').add_child_group(ungrouped)
ungrouped.add_host(new_host)
return new_host
def is_file(self):
'''
Did inventory come from a file?
:returns: True if the inventory is file based, False otherwise
'''
pass
def src(self):
'''
What's the complete path to the inventory file?
:returns: Complete path to the inventory file. None if inventory is
not file-based
'''
pass
def basedir(self):
'''
What directory from which the inventory was read.
'''
return self._basedir
|
kdwink/intellij-community
|
refs/heads/master
|
python/testData/intentions/returnTypeInPy3AnnotationNoColon.py
|
43
|
def fo<caret>o()
@decorator
def bar():
pass
|
wangyum/mxnet
|
refs/heads/master
|
example/gluon/kaggle_k_fold_cross_validation.py
|
25
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example provides an end-to-end pipeline for a common Kaggle competition.
# The entire pipeline includes common utilities such as k-fold cross validation
# and data pre-processing.
#
# Specifically, the example studies the `House Prices: Advanced Regression
# Techniques` challenge as a case study.
#
# The link to the problem on Kaggle:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques
import numpy as np
import pandas as pd
from mxnet import autograd
from mxnet import gluon
from mxnet import ndarray as nd
# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# Get all the numerical features and apply standardization.
numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:
(x - x.mean()) / (x.std()))
# Convert categorical feature values to numerical (including N/A).
all_X = pd.get_dummies(all_X, dummy_na=True)
# Approximate N/A feature value by the mean value of the current feature.
all_X = all_X.fillna(all_X.mean())
num_train = train.shape[0]
# Convert data formats to NDArrays to feed into gluon.
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
square_loss = gluon.loss.L2Loss()
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.collect_params().initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 0.3
weight_decay = 100
batch_size = 100
train_loss, test_loss = \
k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size)
|
BuildingLink/sentry
|
refs/heads/master
|
src/sentry/south_migrations/0277_auto__add_commitfilechange__add_unique_commitfilechange_commit_filenam.py
|
4
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CommitFileChange'
db.create_table('sentry_commitfilechange', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('organization_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('commit', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Commit'])),
('filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('sentry', ['CommitFileChange'])
# Adding unique constraint on 'CommitFileChange', fields ['commit', 'filename']
db.create_unique('sentry_commitfilechange', ['commit_id', 'filename'])
# Adding field 'Repository.url'
db.add_column('sentry_repository', 'url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True),
keep_default=False)
# Adding field 'Repository.provider'
db.add_column('sentry_repository', 'provider',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True),
keep_default=False)
# Adding field 'Repository.external_id'
db.add_column('sentry_repository', 'external_id',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True),
keep_default=False)
# Adding field 'Repository.config'
db.add_column('sentry_repository', 'config',
self.gf('jsonfield.fields.JSONField')(default={}),
keep_default=False)
# Adding field 'Repository.status'
db.add_column('sentry_repository', 'status',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0, db_index=True),
keep_default=False)
# Adding unique constraint on 'Repository', fields ['organization_id', 'provider', 'external_id']
db.create_unique('sentry_repository', ['organization_id', 'provider', 'external_id'])
def backwards(self, orm):
# Removing unique constraint on 'Repository', fields ['organization_id', 'provider', 'external_id']
db.delete_unique('sentry_repository', ['organization_id', 'provider', 'external_id'])
# Removing unique constraint on 'CommitFileChange', fields ['commit', 'filename']
db.delete_unique('sentry_commitfilechange', ['commit_id', 'filename'])
# Deleting model 'CommitFileChange'
db.delete_table('sentry_commitfilechange')
# Deleting field 'Repository.url'
db.delete_column('sentry_repository', 'url')
# Deleting field 'Repository.provider'
db.delete_column('sentry_repository', 'provider')
# Deleting field 'Repository.external_id'
db.delete_column('sentry_repository', 'external_id')
# Deleting field 'Repository.config'
db.delete_column('sentry_repository', 'config')
# Deleting field 'Repository.status'
db.delete_column('sentry_repository', 'status')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 11, 29, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'),)", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'nWSQmbINKkiwvRzlFaq4iWFfAr22O7g3'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
|
Venturi/oldcms
|
refs/heads/master
|
env/lib/python2.7/site-packages/PIL/ExifTags.py
|
71
|
#
# The Python Imaging Library.
# $Id$
#
# EXIF tags
#
# Copyright (c) 2003 by Secret Labs AB
#
# See the README file for information on usage and redistribution.
#
##
# This module provides constants and clear-text names for various
# well-known EXIF tags.
##
##
# Maps EXIF tags to tag names.
TAGS = {
# possibly incomplete
0x00fe: "NewSubfileType",
0x00ff: "SubfileType",
0x0100: "ImageWidth",
0x0101: "ImageLength",
0x0102: "BitsPerSample",
0x0103: "Compression",
0x0106: "PhotometricInterpretation",
0x0107: "Threshholding",
0x0108: "CellWidth",
0x0109: "CellLenght",
0x010a: "FillOrder",
0x010d: "DocumentName",
0x011d: "PageName",
0x010e: "ImageDescription",
0x010f: "Make",
0x0110: "Model",
0x0111: "StripOffsets",
0x0112: "Orientation",
0x0115: "SamplesPerPixel",
0x0116: "RowsPerStrip",
0x0117: "StripByteConunts",
0x0118: "MinSampleValue",
0x0119: "MaxSampleValue",
0x011a: "XResolution",
0x011b: "YResolution",
0x011c: "PlanarConfiguration",
0x0120: "FreeOffsets",
0x0121: "FreeByteCounts",
0x0122: "GrayResponseUnit",
0x0123: "GrayResponseCurve",
0x0128: "ResolutionUnit",
0x012d: "TransferFunction",
0x0131: "Software",
0x0132: "DateTime",
0x013b: "Artist",
0x013c: "HostComputer",
0x013e: "WhitePoint",
0x013f: "PrimaryChromaticities",
0x0140: "ColorMap",
0x0152: "ExtraSamples",
0x0201: "JpegIFOffset",
0x0202: "JpegIFByteCount",
0x0211: "YCbCrCoefficients",
0x0212: "YCbCrSubSampling",
0x0213: "YCbCrPositioning",
0x0214: "ReferenceBlackWhite",
0x1000: "RelatedImageFileFormat",
0x1001: "RelatedImageWidth",
0x1002: "RelatedImageLength",
0x828d: "CFARepeatPatternDim",
0x828e: "CFAPattern",
0x828f: "BatteryLevel",
0x8298: "Copyright",
0x829a: "ExposureTime",
0x829d: "FNumber",
0x8769: "ExifOffset",
0x8773: "InterColorProfile",
0x8822: "ExposureProgram",
0x8824: "SpectralSensitivity",
0x8825: "GPSInfo",
0x8827: "ISOSpeedRatings",
0x8828: "OECF",
0x8829: "Interlace",
0x882a: "TimeZoneOffset",
0x882b: "SelfTimerMode",
0x9000: "ExifVersion",
0x9003: "DateTimeOriginal",
0x9004: "DateTimeDigitized",
0x9101: "ComponentsConfiguration",
0x9102: "CompressedBitsPerPixel",
0x9201: "ShutterSpeedValue",
0x9202: "ApertureValue",
0x9203: "BrightnessValue",
0x9204: "ExposureBiasValue",
0x9205: "MaxApertureValue",
0x9206: "SubjectDistance",
0x9207: "MeteringMode",
0x9208: "LightSource",
0x9209: "Flash",
0x920a: "FocalLength",
0x920b: "FlashEnergy",
0x920c: "SpatialFrequencyResponse",
0x920d: "Noise",
0x9211: "ImageNumber",
0x9212: "SecurityClassification",
0x9213: "ImageHistory",
0x9214: "SubjectLocation",
0x9215: "ExposureIndex",
0x9216: "TIFF/EPStandardID",
0x927c: "MakerNote",
0x9286: "UserComment",
0x9290: "SubsecTime",
0x9291: "SubsecTimeOriginal",
0x9292: "SubsecTimeDigitized",
0xa000: "FlashPixVersion",
0xa001: "ColorSpace",
0xa002: "ExifImageWidth",
0xa003: "ExifImageHeight",
0xa004: "RelatedSoundFile",
0xa005: "ExifInteroperabilityOffset",
0xa20b: "FlashEnergy",
0xa20c: "SpatialFrequencyResponse",
0xa20e: "FocalPlaneXResolution",
0xa20f: "FocalPlaneYResolution",
0xa210: "FocalPlaneResolutionUnit",
0xa214: "SubjectLocation",
0xa215: "ExposureIndex",
0xa217: "SensingMethod",
0xa300: "FileSource",
0xa301: "SceneType",
0xa302: "CFAPattern",
0xa401: "CustomRendered",
0xa402: "ExposureMode",
0xa403: "WhiteBalance",
0xa404: "DigitalZoomRatio",
0xa405: "FocalLengthIn35mmFilm",
0xa406: "SceneCaptureType",
0xa407: "GainControl",
0xa408: "Contrast",
0xa409: "Saturation",
0xa40a: "Sharpness",
0xa40b: "DeviceSettingDescription",
0xa40c: "SubjectDistanceRange",
0xa420: "ImageUniqueID",
0xa430: "CameraOwnerName",
0xa431: "BodySerialNumber",
0xa432: "LensSpecification",
0xa433: "LensMake",
0xa434: "LensModel",
0xa435: "LensSerialNumber",
0xa500: "Gamma",
}
##
# Maps EXIF GPS tags to tag names.
GPSTAGS = {
0: "GPSVersionID",
1: "GPSLatitudeRef",
2: "GPSLatitude",
3: "GPSLongitudeRef",
4: "GPSLongitude",
5: "GPSAltitudeRef",
6: "GPSAltitude",
7: "GPSTimeStamp",
8: "GPSSatellites",
9: "GPSStatus",
10: "GPSMeasureMode",
11: "GPSDOP",
12: "GPSSpeedRef",
13: "GPSSpeed",
14: "GPSTrackRef",
15: "GPSTrack",
16: "GPSImgDirectionRef",
17: "GPSImgDirection",
18: "GPSMapDatum",
19: "GPSDestLatitudeRef",
20: "GPSDestLatitude",
21: "GPSDestLongitudeRef",
22: "GPSDestLongitude",
23: "GPSDestBearingRef",
24: "GPSDestBearing",
25: "GPSDestDistanceRef",
26: "GPSDestDistance",
27: "GPSProcessingMethod",
28: "GPSAreaInformation",
29: "GPSDateStamp",
30: "GPSDifferential",
31: "GPSHPositioningError",
}
|
ashokrajbathu/boabrock
|
refs/heads/master
|
frappe/__init__.py
|
6
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals
from werkzeug.local import Local, release_local
import os, importlib, inspect, logging, json
# public
from frappe.__version__ import __version__
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg):
"""Returns translated string in current lang, if exists."""
if local.lang == "en":
return msg
from frappe.translate import get_full_dict
return get_full_dict(local.lang).get(msg, msg)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
if local.lang=="en":
return {}
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
request_method = local("request_method")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.flags = _dict({
"ran_schedulers": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_method = request.method if request else None
local.request_ip = None
local.response = _dict({"docs":[]})
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = None
local.user = None
local.user_obj = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.jenv = None
local.jloader =None
local.cache = {}
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
local.form_dict = _dict()
local.session = _dict()
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
return _dict(config)
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get("cache_redis_server") or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
import utils
return utils.get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
from utils import cstr
if not request or (not "cmd" in local.form_dict):
print cstr(msg)
error_log.append(cstr(msg))
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print repr(msg)
from utils import cstr
debug_log.append(cstr(msg))
def msgprint(msg, small=0, raise_exception=0, as_table=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param small: [optional] Show as a floating message in the footer.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from utils import cstr, encode
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception, encode(msg)
else:
raise ValidationError, encode(msg)
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages:
print "Message: " + repr(msg)
message_log.append((small and '__small:' or '')+cstr(msg or ''))
_raise_exception()
def throw(msg, exc=ValidationError):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_obj = None
def get_user():
from frappe.utils.user import User
if not local.user_obj:
local.user_obj = User(local.session.user)
return local.user_obj
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.utils.user
return frappe.utils.user.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=(), sender="", subject="No Subject", message="No Message",
as_markdown=False, bulk=False, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=(), message_id=None, as_bulk=False, send_after=None):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param bulk: Send via scheduled email sender **Bulk Email**. Don't send immediately.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To email id.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param send_after: Send after the given datetime.
"""
if bulk or as_bulk:
import frappe.email.bulk
frappe.email.bulk.send(recipients=recipients, sender=sender,
subject=subject, message=content or message,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, message_id=message_id, send_after=send_after)
else:
import frappe.email
if as_markdown:
frappe.email.sendmail_md(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to,
cc=cc, message_id=message_id)
else:
frappe.email.sendmail(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to,
cc=cc, message_id=message_id)
logger = None
whitelisted = []
guest_methods = []
def whitelist(allow_guest=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
frappe.local.cache = {}
for fn in frappe.get_hooks("clear_cache"):
get_attr(fn)()
frappe.local.role_permissions = {}
def has_permission(doctype, ptype="read", doc=None, user=None, verbose=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
import frappe.permissions
return frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
def has_website_permission(doctype, ptype="read", doc=None, user=None, verbose=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(get_attr(method), doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where ifnull(istable,0)=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
return hashlib.sha224((txt or "") + repr(time.time()) + repr(random_string(8))).hexdigest()
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(arg1, arg2=None):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(arg1, arg2)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags)
def delete_doc_if_exists(doctype, name):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name)
def reload_doctype(doctype, force=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype), force=force)
def reload_doc(module, dt=None, dn=None, force=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force)
def rename_doc(doctype, old, new, debug=0, force=False, merge=False, ignore_permissions=False):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(doctype, old, new, force=force, merge=merge, ignore_permissions=ignore_permissions)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_frappe=False, with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
apps.extend(get_file_items(os.path.join(local.site_path, "apps.txt")))
if with_frappe:
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
return installed
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
def append_hook(target, key, value):
if isinstance(value, dict):
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
append_to_list(target, key, value)
def append_to_list(target, key, value):
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
from frappe.utils import cstr
if os.path.exists(path):
with open(path, "r") as f:
return cstr(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties)."""
args = _dict(args)
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field or "DocField",
'doc_type': args.doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.name = None
newdoc.set("__islocal", 1)
newdoc.owner = None
newdoc.creation = None
newdoc.amended_from = None
newdoc.amendment_date = None
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for d in newdoc.get_all_children():
d.name = None
d.parent = None
d.set("__islocal", 1)
d.owner = None
d.creation = None
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code."""
local.message_title = title
local.message = html
local.message_success = success
local.response['type'] = 'page'
local.response['page_name'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def add_version(doc):
"""Insert a new **Version** of the given document.
A **Version** is a JSON dump of the current document state."""
get_doc({
"doctype": "Version",
"ref_doctype": doc.doctype,
"docname": doc.name,
"doclist_json": as_json(doc.as_dict())
}).insert(ignore_permissions=True)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(value, df, doc=None, currency=None):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(value, df, doc, currency=currency)
def get_print(doctype, name, print_format=None, style=None, html=None, as_pdf=False):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
if not html:
html = build_page("print")
if as_pdf:
return get_pdf(html)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
local.flags.ignore_print_permissions = True
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
return out
logging_setup_complete = False
def get_logger(module=None):
from frappe.setup_logging import setup_logging
global logging_setup_complete
if not logging_setup_complete:
setup_logging()
logging_setup_complete = True
return logging.getLogger(module or "frappe")
|
opennode/nodeconductor-assembly-waldur
|
refs/heads/develop
|
src/waldur_core/core/migrations/0006_extend_organization.py
|
2
|
# Generated by Django 1.11.20 on 2019-04-15 10:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_user_backend_id'),
]
operations = [
migrations.AlterField(
model_name='user',
name='organization',
field=models.CharField(
blank=True, max_length=255, verbose_name='organization'
),
),
]
|
shuggiefisher/django-on-google-app-engine-base
|
refs/heads/master
|
django/db/backends/postgresql/base.py
|
241
|
"""
PostgreSQL database backend for Django.
Requires psycopg 1: http://initd.org/projects/psycopg1
"""
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.introspection import DatabaseIntrospection
from django.db.backends.postgresql.operations import DatabaseOperations
from django.db.backends.postgresql.version import get_version
from django.utils.encoding import smart_str, smart_unicode
try:
import psycopg as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class UnicodeCursorWrapper(object):
"""
A thin wrapper around psycopg cursors that allows them to accept Unicode
strings as params.
This is necessary because psycopg doesn't apply any DB quoting to
parameters that are Unicode strings. If a param is Unicode, this will
convert it to a bytestring using database client's encoding before passing
it to psycopg.
All results retrieved from the database are converted into Unicode strings
before being returned to the caller.
"""
def __init__(self, cursor, charset):
self.cursor = cursor
self.charset = charset
def format_params(self, params):
if isinstance(params, dict):
result = {}
charset = self.charset
for key, value in params.items():
result[smart_str(key, charset)] = smart_str(value, charset)
return result
else:
return tuple([smart_str(p, self.charset, True) for p in params])
def execute(self, sql, params=()):
try:
return self.cursor.execute(smart_str(sql, self.charset), self.format_params(params))
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, sql, param_list):
try:
new_param_list = [self.format_params(params) for params in param_list]
return self.cursor.executemany(sql, new_param_list)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor.fetchall())
class DatabaseFeatures(BaseDatabaseFeatures):
uses_savepoints = True
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
import warnings
warnings.warn(
'The "postgresql" backend has been deprecated. Use "postgresql_psycopg2" instead.',
DeprecationWarning
)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_connection = False
set_tz = False
settings_dict = self.settings_dict
if self.connection is None:
new_connection = True
set_tz = settings_dict.get('TIME_ZONE')
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify NAME in your Django settings file.")
conn_string = "dbname=%s" % settings_dict['NAME']
if settings_dict['USER']:
conn_string = "user=%s %s" % (settings_dict['USER'], conn_string)
if settings_dict['PASSWORD']:
conn_string += " password='%s'" % settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_string += " host=%s" % settings_dict['HOST']
if settings_dict['PORT']:
conn_string += " port=%s" % settings_dict['PORT']
self.connection = Database.connect(conn_string, **settings_dict['OPTIONS'])
# make transactions transparent to all cursors
self.connection.set_isolation_level(1)
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
if new_connection:
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings_dict['TIME_ZONE']])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version[0:2] < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
cursor.execute("SET client_encoding to 'UNICODE'")
return UnicodeCursorWrapper(cursor, 'utf-8')
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
def typecast_string(s):
"""
Cast all returned strings to unicode strings.
"""
if not s and not isinstance(s, str):
return s
return smart_unicode(s)
# Register these custom typecasts, because Django expects dates/times to be
# in Python's native (standard-library) datetime/time format, whereas psycopg
# use mx.DateTime by default.
try:
Database.register_type(Database.new_type((1082,), "DATE", util.typecast_date))
except AttributeError:
raise Exception("You appear to be using psycopg version 2. Set your DATABASES.ENGINE to 'postgresql_psycopg2' instead of 'postgresql'.")
Database.register_type(Database.new_type((1083,1266), "TIME", util.typecast_time))
Database.register_type(Database.new_type((1114,1184), "TIMESTAMP", util.typecast_timestamp))
Database.register_type(Database.new_type((16,), "BOOLEAN", util.typecast_boolean))
Database.register_type(Database.new_type((1700,), "NUMERIC", util.typecast_decimal))
Database.register_type(Database.new_type(Database.types[1043].values, 'STRING', typecast_string))
|
tartavull/google-cloud-python
|
refs/heads/master
|
monitoring/google/cloud/monitoring/metric.py
|
7
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric Descriptors for the `Google Stackdriver Monitoring API (V3)`_.
.. _Google Stackdriver Monitoring API (V3):
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\
projects.metricDescriptors
"""
import collections
from google.cloud.monitoring.label import LabelDescriptor
class MetricKind(object):
"""Choices for the `kind of measurement`_.
.. _kind of measurement:
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\
projects.metricDescriptors#MetricKind
"""
METRIC_KIND_UNSPECIFIED = 'METRIC_KIND_UNSPECIFIED'
""".. note:: An unspecified kind is not allowed in metric descriptors."""
GAUGE = 'GAUGE'
DELTA = 'DELTA'
CUMULATIVE = 'CUMULATIVE'
class ValueType(object):
"""Choices for the `metric value type`_.
.. _metric value type:
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\
projects.metricDescriptors#ValueType
"""
VALUE_TYPE_UNSPECIFIED = 'VALUE_TYPE_UNSPECIFIED'
""".. note:: An unspecified type is not allowed in metric descriptors."""
BOOL = 'BOOL'
INT64 = 'INT64'
DOUBLE = 'DOUBLE'
STRING = 'STRING'
DISTRIBUTION = 'DISTRIBUTION'
class MetricDescriptor(object):
"""Specification of a metric type and its schema.
The preferred way to construct a metric descriptor object is using the
:meth:`~google.cloud.monitoring.client.Client.metric_descriptor` factory
method of the :class:`~google.cloud.monitoring.client.Client` class.
:type client: :class:`google.cloud.monitoring.client.Client`
:param client: A client for operating on the metric descriptor.
:type type_: str
:param type_:
The metric type including a DNS name prefix. For example:
``"compute.googleapis.com/instance/cpu/utilization"``
:type metric_kind: str
:param metric_kind:
The kind of measurement. It must be one of
:data:`MetricKind.GAUGE`, :data:`MetricKind.DELTA`,
or :data:`MetricKind.CUMULATIVE`. See :class:`MetricKind`.
:type value_type: str
:param value_type:
The value type of the metric. It must be one of
:data:`ValueType.BOOL`, :data:`ValueType.INT64`,
:data:`ValueType.DOUBLE`, :data:`ValueType.STRING`,
or :data:`ValueType.DISTRIBUTION`.
See :class:`ValueType`.
:type labels:
list of :class:`~google.cloud.monitoring.label.LabelDescriptor`
:param labels:
A sequence of zero or more label descriptors specifying the labels
used to identify a specific instance of this metric.
:type unit: str
:param unit: An optional unit in which the metric value is reported.
:type description: str
:param description: An optional detailed description of the metric.
:type display_name: str
:param display_name: An optional concise name for the metric.
:type name: str
:param name:
(Optional) The "resource name" of the metric descriptor. For example:
``"projects/<project_id>/metricDescriptors/<type>"``. As
retrieved from the service, this will always be specified.
You can and should omit it when constructing an instance for
the purpose of creating a new metric descriptor.
"""
def __init__(self, client, type_,
metric_kind=MetricKind.METRIC_KIND_UNSPECIFIED,
value_type=ValueType.VALUE_TYPE_UNSPECIFIED,
labels=(),
unit='', description='', display_name='',
name=None):
self.client = client
self.name = name
self.type = type_
self.labels = labels
self.metric_kind = metric_kind
self.value_type = value_type
self.unit = unit
self.description = description
self.display_name = display_name
def create(self):
"""Create a new metric descriptor based on this object.
Example::
>>> descriptor = client.metric_descriptor(
... 'custom.googleapis.com/my_metric',
... metric_kind=MetricKind.GAUGE,
... value_type=ValueType.DOUBLE,
... description='This is a simple example of a custom metric.')
>>> descriptor.create()
The metric kind must not be :data:`MetricKind.METRIC_KIND_UNSPECIFIED`,
and the value type must not be
:data:`ValueType.VALUE_TYPE_UNSPECIFIED`.
The ``name`` attribute is ignored in preparing the creation request.
All attributes are overwritten by the values received in the response
(normally affecting only ``name``).
"""
path = '/projects/{project}/metricDescriptors/'.format(
project=self.client.project)
response = self.client._connection.api_request(
method='POST', path=path, data=self._to_dict())
self._init_from_dict(response)
def delete(self):
"""Delete the metric descriptor identified by this object.
Example::
>>> descriptor = client.metric_descriptor(
... 'custom.googleapis.com/my_metric')
>>> descriptor.delete()
Only the ``client`` and ``type`` attributes are used.
"""
path = '/projects/{project}/metricDescriptors/{type}'.format(
project=self.client.project,
type=self.type)
self.client._connection.api_request(method='DELETE', path=path)
@classmethod
def _fetch(cls, client, metric_type):
"""Look up a metric descriptor by type.
:type client: :class:`google.cloud.monitoring.client.Client`
:param client: The client to use.
:type metric_type: str
:param metric_type: The metric type name.
:rtype: :class:`MetricDescriptor`
:returns: The metric descriptor instance.
:raises: :class:`google.cloud.exceptions.NotFound` if the metric
descriptor is not found.
"""
path = '/projects/{project}/metricDescriptors/{type}'.format(
project=client.project,
type=metric_type)
info = client._connection.api_request(method='GET', path=path)
return cls._from_dict(client, info)
@classmethod
def _list(cls, client, filter_string=None, type_prefix=None):
"""List all metric descriptors for the project.
:type client: :class:`google.cloud.monitoring.client.Client`
:param client: The client to use.
:type filter_string: str
:param filter_string:
(Optional) Filter expression describing the metric descriptors to
be returned. See the `filter documentation`_.
:type type_prefix: str
:param type_prefix:
(Optional) Prefix constraining the selected metric types. This adds
``metric.type = starts_with("<prefix>")`` to the filter.
:rtype: list of :class:`MetricDescriptor`
:returns: A list of metric descriptor instances.
.. _filter documentation:
https://cloud.google.com/monitoring/api/v3/filters
"""
path = '/projects/{project}/metricDescriptors/'.format(
project=client.project)
filters = []
if filter_string is not None:
filters.append(filter_string)
if type_prefix is not None:
filters.append('metric.type = starts_with("{prefix}")'.format(
prefix=type_prefix))
descriptors = []
page_token = None
while True:
params = {}
if filters:
params['filter'] = ' AND '.join(filters)
if page_token is not None:
params['pageToken'] = page_token
response = client._connection.api_request(
method='GET', path=path, query_params=params)
for info in response.get('metricDescriptors', ()):
descriptors.append(cls._from_dict(client, info))
page_token = response.get('nextPageToken')
if not page_token:
break
return descriptors
@classmethod
def _from_dict(cls, client, info):
"""Construct a metric descriptor from the parsed JSON representation.
:type client: :class:`google.cloud.monitoring.client.Client`
:param client: A client to be included in the returned object.
:type info: dict
:param info:
A ``dict`` parsed from the JSON wire-format representation.
:rtype: :class:`MetricDescriptor`
:returns: A metric descriptor.
"""
descriptor = cls(client, None)
descriptor._init_from_dict(info)
return descriptor
def _init_from_dict(self, info):
"""Initialize attributes from the parsed JSON representation.
:type info: dict
:param info:
A ``dict`` parsed from the JSON wire-format representation.
"""
self.name = info['name']
self.type = info['type']
self.labels = tuple(LabelDescriptor._from_dict(label)
for label in info.get('labels', []))
self.metric_kind = info['metricKind']
self.value_type = info['valueType']
self.unit = info.get('unit', '')
self.description = info.get('description', '')
self.display_name = info.get('displayName', '')
def _to_dict(self):
"""Build a dictionary ready to be serialized to the JSON wire format.
:rtype: dict
:returns: A dictionary.
"""
info = {
'type': self.type,
'metricKind': self.metric_kind,
'valueType': self.value_type,
}
if self.labels:
info['labels'] = [label._to_dict() for label in self.labels]
if self.unit:
info['unit'] = self.unit
if self.description:
info['description'] = self.description
if self.display_name:
info['displayName'] = self.display_name
return info
def __repr__(self):
return (
'<MetricDescriptor:\n'
' name={name!r},\n'
' type={type!r},\n'
' metric_kind={metric_kind!r}, value_type={value_type!r},\n'
' labels={labels!r},\n'
' display_name={display_name!r}, unit={unit!r},\n'
' description={description!r}>'
).format(**self.__dict__)
class Metric(collections.namedtuple('Metric', 'type labels')):
"""A specific metric identified by specifying values for all labels.
The preferred way to construct a metric object is using the
:meth:`~google.cloud.monitoring.client.Client.metric` factory method
of the :class:`~google.cloud.monitoring.client.Client` class.
:type type: str
:param type: The metric type name.
:type labels: dict
:param labels: A mapping from label names to values for all labels
enumerated in the associated :class:`MetricDescriptor`.
"""
__slots__ = ()
@classmethod
def _from_dict(cls, info):
"""Construct a metric object from the parsed JSON representation.
:type info: dict
:param info:
A ``dict`` parsed from the JSON wire-format representation.
:rtype: :class:`Metric`
:returns: A metric object.
"""
return cls(
type=info['type'],
labels=info.get('labels', {}),
)
def _to_dict(self):
"""Build a dictionary ready to be serialized to the JSON format.
:rtype: dict
:returns: A dict representation of the object that can be written to
the API.
"""
return {
'type': self.type,
'labels': self.labels,
}
|
hayd/pyfaker
|
refs/heads/master
|
pyfaker/xeger.py
|
1
|
# Xeger.py from https://bitbucket.org/leapfrogdevelopment/rstr/overview
import re
import string
from random import choice, randint
from itertools import chain
import sys
if sys.version_info[0] >= 3:
unichr = chr
# The * and + characters in a regular expression
# match up to any number of repeats in theory,
#(and actually 65535 repeats in python) but you
# probably don't want that many repeats in your
# generated strings. This sets an upper-bound on
# repeats generated from + and * characters.
STAR_PLUS_LIMIT = 100
class Xeger(object):
"""Inspired by the Java library Xeger: http://code.google.com/p/xeger/
This class adds functionality to Rstr allowing users to generate a
semi-random string from a regular expression."""
def __init__(self):
super(Xeger, self).__init__()
self._cache = dict()
self._categories = {
"category_digit": lambda: self._alphabets['digits'],
"category_not_digit": lambda: self._alphabets['nondigits'],
"category_space": lambda: self._alphabets['whitespace'],
"category_not_space": lambda: self._alphabets['nonwhitespace'],
"category_word": lambda: self._alphabets['word'],
"category_not_word": lambda: self._alphabets['nonword'],
}
self._cases = {"literal": lambda x: unichr(x),
"not_literal": lambda x: choice(
string.printable.replace(unichr(x), '')),
"at": lambda x: '',
"in": lambda x: self._handle_in(x),
"any": lambda x: self.printable(1),
"range": lambda x: [unichr(i) for i in range(x[0], x[1] + 1)],
"category": lambda x: self._categories[x](),
'branch': lambda x: ''.join(self._handle_state(i) for
i in choice(x[1])),
"subpattern": lambda x: self._handle_group(x),
"assert": lambda x: ''.join(self._handle_state(i) for i in x[1]),
"assert_not": lambda x: '',
"groupref": lambda x: self._cache[x],
'max_repeat': lambda x: self._handle_repeat(*x),
'negate': lambda x: [False],
}
def xeger(self, string_or_regex):
try:
pattern = string_or_regex.pattern
except AttributeError:
pattern = string_or_regex
parsed = re.sre_parse.parse(pattern)
result = self._build_string(parsed)
self._cache.clear()
return result
def _build_string(self, parsed):
newstr = []
for state in parsed:
newstr.append(self._handle_state(state))
return ''.join(newstr)
def _handle_state(self, state):
opcode, value = state
return self._cases[opcode](value)
def _handle_group(self, value):
result = ''.join(self._handle_state(i) for i in value[1])
if value[0]:
self._cache[value[0]] = result
return result
def _handle_in(self, value):
candidates = list(chain(*(self._handle_state(i) for
i in value)))
if candidates[0] is False:
candidates = set(string.printable).difference(candidates[1:])
return choice(list(candidates))
else:
return choice(candidates)
def _handle_repeat(self, start_range, end_range, value):
result = []
end_range = min((end_range, STAR_PLUS_LIMIT))
times = randint(start_range, end_range)
for i in range(times):
result.append(''.join(self._handle_state(i) for i in value))
return ''.join(result)
|
hellofreedom/ansible-modules-core
|
refs/heads/devel
|
cloud/rackspace/rax_queue.py
|
157
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_queue
short_description: create / delete a queue in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud queue.
version_added: "1.5"
options:
name:
description:
- Name to give the queue
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Queue
gather_facts: False
hosts: local
connection: local
tasks:
- name: Queue create request
local_action:
module: rax_queue
credentials: ~/.raxpub
name: my-queue
region: DFW
state: present
register: my_queue
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_queue(module, state, name):
for arg in (state, name):
if not arg:
module.fail_json(msg='%s is required for rax_queue' % arg)
changed = False
queues = []
instance = {}
cq = pyrax.queues
if not cq:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
for queue in cq.list():
if name != queue.name:
continue
queues.append(queue)
if len(queues) > 1:
module.fail_json(msg='Multiple Queues were matched by name')
if state == 'present':
if not queues:
try:
queue = cq.create(name)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
queue = queues[0]
instance = dict(name=queue.name)
result = dict(changed=changed, queue=instance)
module.exit_json(**result)
elif state == 'absent':
if queues:
queue = queues[0]
try:
queue.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, queue=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
state = module.params.get('state')
setup_rax_module(module, pyrax)
cloud_queue(module, state, name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
|
rhinstaller/pyparted
|
refs/heads/master
|
tests/test_parted_device.py
|
3
|
#
# Test cases for the methods in the parted.device module itself
#
# Copyright (C) 2009-2011 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Cantrell <dcantrell@redhat.com>
#
import unittest
from tests.baseclass import RequiresDevice
# One class per method, multiple tests per class. For these simple methods,
# that seems like good organization. More complicated methods may require
# multiple classes and their own test suite.
@unittest.skip("Unimplemented test case.")
class DeviceNewTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceGetSetTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceOpenTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceCloseTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceDestroyTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceRemoveFromCacheTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceBeginExternalAccessTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceEndExternalAccessTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceReadTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceWriteTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceSyncTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceCheckTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceStartSectorToCylinderTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceEndSectorToCylinderTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceStartCylinderToSectorTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceEndCylinderToSectorTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceGetSizeTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
class DeviceGetLengthTestCase(RequiresDevice):
def runTest(self):
self.assertEqual(self.device.getLength(), self.device.length)
@unittest.skip("Unimplemented test case.")
class DeviceGetSizeAsSectorsTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceGetConstraintTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceGetPedDeviceTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
@unittest.skip("Unimplemented test case.")
class DeviceStrTestCase(unittest.TestCase):
def runTest(self):
# TODO
self.fail("Unimplemented test case.")
|
blossomica/airmozilla
|
refs/heads/master
|
airmozilla/main/views/reports.py
|
2
|
import datetime
from django import http
from django.utils import timezone
from django.shortcuts import get_object_or_404, redirect, render
from django.db.models import Count
from django.contrib import messages
from django.db import transaction
from django.contrib.auth.decorators import login_required
from airmozilla.main.models import (
Event,
EventHitStats,
Tag,
EventRevision,
Picture
)
from airmozilla.main.views import is_contributor
from airmozilla.main import forms
def executive_summary(request):
form = forms.ExecutiveSummaryForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
if form.cleaned_data.get('start'):
start_date = form.cleaned_data['start']
else:
start_date = timezone.now()
start_date -= datetime.timedelta(days=start_date.weekday())
# start date is now a Monday
assert start_date.strftime('%A') == 'Monday'
def make_date_range_title(start):
title = "Week of "
last = start + datetime.timedelta(days=6)
if start.month == last.month:
title += start.strftime('%d ')
else:
title += start.strftime('%d %B ')
title += '- ' + last.strftime('%d %B %Y')
return title
def get_ranges(start):
this_week = start.replace(hour=0, minute=0, second=0)
next_week = this_week + datetime.timedelta(days=7)
yield ("This Week", this_week, next_week)
last_week = this_week - datetime.timedelta(days=7)
yield ("Last Week", last_week, this_week)
# Subtracting 365 days doesn't mean land on a Monday of last
# year so we need to trace back to the nearest Monday
this_week_ly = this_week - datetime.timedelta(days=365)
this_week_ly = (
this_week_ly - datetime.timedelta(days=this_week_ly.weekday())
)
next_week_ly = this_week_ly + datetime.timedelta(days=7)
yield ("This Week Last Year", this_week_ly, next_week_ly)
first_day = this_week.replace(month=1, day=1)
if first_day.year == next_week.year:
yield ("Year to Date", first_day, next_week)
else:
yield ("Year to Date", first_day, this_week)
first_day_ny = first_day.replace(year=first_day.year + 1)
yield ("%s Total" % first_day.year, first_day, first_day_ny)
last_year = first_day.replace(year=first_day.year - 1)
yield ("%s Total" % last_year.year, last_year, first_day)
last_year2 = last_year.replace(year=last_year.year - 1)
yield ("%s Total" % last_year2.year, last_year2, last_year)
last_year3 = last_year2.replace(year=last_year2.year - 1)
yield ("%s Total" % last_year3.year, last_year3, last_year2)
ranges = get_ranges(start_date)
rows = []
for label, start, end in ranges:
events = Event.objects.all().approved().filter(
start_time__gte=start, start_time__lt=end
)
rows.append((
label,
(start, end, end - datetime.timedelta(days=1)),
events.count(),
events.filter(location__name__istartswith='Cyberspace').count(),
events.filter(location__isnull=True).count(),
))
# Now for stats on views, which is done by their archive date
week_from_today = timezone.now() - datetime.timedelta(days=7)
stats = (
EventHitStats.objects
.exclude(event__archive_time__isnull=True)
.filter(
event__archive_time__lt=week_from_today,
)
.exclude(event__channels__exclude_from_trending=True)
.order_by('-score')
.extra(select={
'score': '(featured::int + 1) * total_hits'
'/ extract(days from (now() - archive_time)) ^ 1.8',
})
.select_related('event')
)
prev_start = start_date - datetime.timedelta(days=7)
now = timezone.now()
if (start_date + datetime.timedelta(days=7)) <= now:
next_start = start_date + datetime.timedelta(days=7)
else:
next_start = None
context = {
'date_range_title': make_date_range_title(start_date),
'rows': rows,
'stats': stats[:10],
'prev_start': prev_start,
'next_start': next_start,
}
return render(request, 'main/executive_summary.html', context)
@login_required
def unpicked_pictures(request):
"""returns a report of all events that have pictures in the picture
gallery but none has been picked yet. """
pictures = Picture.objects.filter(event__isnull=False)
events = Event.objects.archived()
assert request.user.is_active
if is_contributor(request.user):
events = events.exclude(privacy=Event.PRIVACY_COMPANY)
events = events.filter(id__in=pictures.values('event'))
events = events.exclude(picture__in=pictures)
count = events.count()
events = events.order_by('?')[:20]
pictures_counts = {}
grouped_pictures = (
Picture.objects
.filter(event__in=events)
.values('event')
.annotate(Count('event'))
)
for each in grouped_pictures:
pictures_counts[each['event']] = each['event__count']
context = {
'count': count,
'events': events,
'pictures_counts': pictures_counts,
}
return render(request, 'main/unpicked_pictures.html', context)
@login_required
@transaction.atomic
def too_few_tags(request):
"""returns a report of all events that very few tags"""
if request.method == 'POST':
form = forms.EventEditTagsForm(request.POST)
if form.is_valid():
event = get_object_or_404(Event, id=form.cleaned_data['event_id'])
assert request.user.is_active
if is_contributor(request.user):
assert event.privacy != Event.PRIVACY_COMPANY
if not EventRevision.objects.filter(event=event).count():
EventRevision.objects.create_from_event(event)
value = set([
x.strip()
for x in form.cleaned_data['tags'].split(',')
if x.strip()
])
prev = set([x.name for x in event.tags.all()])
for tag in prev - value:
tag_obj = Tag.objects.get(name=tag)
event.tags.remove(tag_obj)
added = []
for tag in value - prev:
try:
tag_obj = Tag.objects.get(name__iexact=tag)
except Tag.DoesNotExist:
tag_obj = Tag.objects.create(name=tag)
except Tag.MultipleObjectsReturned:
tag_obj, = Tag.objects.filter(name__iexact=tag)[:1]
event.tags.add(tag_obj)
added.append(tag_obj)
EventRevision.objects.create_from_event(
event,
user=request.user
)
messages.success(
request,
'Thank you for adding: %s' % ', '.join(x.name for x in added)
)
return redirect('main:too_few_tags')
zero_tags = (
Event.objects.scheduled_or_processing()
.exclude(id__in=Event.tags.through.objects.values('event_id'))
)
few_tags = (
Event.tags.through.objects
.filter(event__status=Event.STATUS_SCHEDULED)
.values('event_id')
.annotate(count=Count('event'))
.filter(count__lt=2)
)
assert request.user.is_active
if is_contributor(request.user):
few_tags = few_tags.exclude(event__privacy=Event.PRIVACY_COMPANY)
zero_tags = zero_tags.exclude(privacy=Event.PRIVACY_COMPANY)
count = zero_tags.count()
count += few_tags.count()
try:
event, = zero_tags.order_by('?')[:1]
except ValueError:
event = None
if few_tags.count():
try:
first, = few_tags.order_by('?')[:1]
event = Event.objects.get(id=first['event_id'])
except ValueError:
# there's nothing!
event = None
assert count == 0
context = {
'count': count,
'event': event,
}
if event:
initial = {
'tags': ', '.join(x.name for x in event.tags.all()),
'event_id': event.id,
}
context['form'] = forms.EventEditTagsForm(
initial=initial,
instance=event
)
return render(request, 'main/too_few_tags.html', context)
|
bjornaa/ladim
|
refs/heads/master
|
doc/source/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "LADiM"
author = "Bjørn Ådlandsvik"
copyright = "2018, Institute of Marine Research"
# The short X.Y version
version = "1.1"
# The full version, including alpha/beta/rc tags
release = "1.1.0"
today_fmt = "%Y-%m-%d"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.mathjax"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
# exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinxdoc"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_logo = "logo.png"
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "LADiMdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '', # Needed for readthedocs
"preamble": "".join(
(
r"\DeclareUnicodeCharacter{F8}{\o}", # ø
r"\DeclareUnicodeCharacter{C5}{\AA}", # Å
)
),
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"ladim.tex",
"LADiM Documentation",
r"Bjørn Ådlandsvik <bjorn@imr.no>\\Institute of Marine Research",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "ladim", "LADiM Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"LADiM",
"LADiM Documentation",
author,
"LADiM",
"One line description of project.",
"Miscellaneous",
)
]
# -- Extension configuration -------------------------------------------------
|
bmenasha/appengine-mapreduce
|
refs/heads/master
|
python/src/mapreduce/handlers.py
|
28
|
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Defines executor tasks handlers for MapReduce implementation."""
# pylint: disable=protected-access
# pylint: disable=g-bad-name
import datetime
import logging
import math
import os
import random
import sys
import time
import traceback
import zlib
try:
import json
except ImportError:
import simplejson as json
from google.appengine.ext import ndb
from google.appengine import runtime
from google.appengine.api import datastore_errors
from google.appengine.api import logservice
from google.appengine.api import taskqueue
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import context
from mapreduce import errors
from mapreduce import input_readers
from mapreduce import map_job_context
from mapreduce import model
from mapreduce import operation
from mapreduce import output_writers
from mapreduce import parameters
from mapreduce import shard_life_cycle
from mapreduce import util
from mapreduce.api import map_job
from google.appengine.runtime import apiproxy_errors
# pylint: disable=g-import-not-at-top
try:
import cloudstorage
# In 25 runtime, the above code will be scrubbed to import the stub version
# of cloudstorage. All occurences of the following if condition in MR
# codebase is to tell it apart.
# TODO(user): Remove after 25 runtime MR is abondoned.
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
except ImportError:
cloudstorage = None # CloudStorage library not available
# A guide to logging.
# log.critical: messages user absolutely should see, e.g. failed job.
# log.error: exceptions during processing user data, or unexpected
# errors detected by mr framework.
# log.warning: errors mr framework knows how to handle.
# log.info: other expected events.
# Set of strings of various test-injected faults.
_TEST_INJECTED_FAULTS = set()
def _run_task_hook(hooks, method, task, queue_name):
"""Invokes hooks.method(task, queue_name).
Args:
hooks: A hooks.Hooks instance or None.
method: The name of the method to invoke on the hooks class e.g.
"enqueue_kickoff_task".
task: The taskqueue.Task to pass to the hook method.
queue_name: The name of the queue to pass to the hook method.
Returns:
True if the hooks.Hooks instance handled the method, False otherwise.
"""
if hooks is not None:
try:
getattr(hooks, method)(task, queue_name)
except NotImplementedError:
# Use the default task addition implementation.
return False
return True
return False
class MapperWorkerCallbackHandler(base_handler.HugeTaskHandler):
"""Callback handler for mapreduce worker task."""
# These directives instruct self.__return() how to set state and enqueue task.
_TASK_DIRECTIVE = util._enum(
# Task is running as expected.
PROCEED_TASK="proceed_task",
# Need to retry task. Lock was NOT acquired when the error occur.
# Don't change payload or datastore.
RETRY_TASK="retry_task",
# Need to retry task. Lock was acquired when the error occurr.
# Don't change payload or datastore.
RETRY_SLICE="retry_slice",
# Drop the task (due to duplicated task). Must log permanent drop.
DROP_TASK="drop_task",
# See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery.
RECOVER_SLICE="recover_slice",
# Need to retry the shard.
RETRY_SHARD="retry_shard",
# Need to drop task and fail the shard. Log permanent failure.
FAIL_TASK="fail_task",
# Need to abort the shard.
ABORT_SHARD="abort_shard")
def __init__(self, *args):
"""Constructor."""
super(MapperWorkerCallbackHandler, self).__init__(*args)
self._time = time.time
self.slice_context = None
self.shard_context = None
def _drop_gracefully(self):
"""Drop worker task gracefully.
Set current shard_state to failed. Controller logic will take care of
other shards and the entire MR.
"""
shard_id = self.request.headers[util._MR_SHARD_ID_TASK_HEADER]
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
shard_state, mr_state = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceState.get_key_by_job_id(mr_id)])
if shard_state and shard_state.active:
shard_state.set_for_failure()
config = util.create_datastore_write_config(mr_state.mapreduce_spec)
shard_state.put(config=config)
def _try_acquire_lease(self, shard_state, tstate):
"""Validate datastore and the task payload are consistent.
If so, attempt to get a lease on this slice's execution.
See model.ShardState doc on slice_start_time.
Args:
shard_state: model.ShardState from datastore.
tstate: model.TransientShardState from taskqueue paylod.
Returns:
A _TASK_DIRECTIVE enum. PROCEED_TASK if lock is acquired.
RETRY_TASK if task should be retried, DROP_TASK if task should
be dropped. Only old tasks (comparing to datastore state)
will be dropped. Future tasks are retried until they naturally
become old so that we don't ever stuck MR.
"""
# Controller will tally shard_states and properly handle the situation.
if not shard_state:
logging.warning("State not found for shard %s; Possible spurious task "
"execution. Dropping this task.",
tstate.shard_id)
return self._TASK_DIRECTIVE.DROP_TASK
if not shard_state.active:
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning(str(shard_state))
return self._TASK_DIRECTIVE.DROP_TASK
# Validate shard retry count.
if shard_state.retries > tstate.retries:
logging.warning(
"Got shard %s from previous shard retry %s. Possible spurious "
"task execution. Dropping this task.",
tstate.shard_id,
tstate.retries)
logging.warning(str(shard_state))
return self._TASK_DIRECTIVE.DROP_TASK
elif shard_state.retries < tstate.retries:
# By the end of last slice, task enqueue succeeded but datastore commit
# failed. That transaction will be retried and adding the same task
# will pass.
logging.warning(
"ShardState for %s is behind slice. Waiting for it to catch up",
shard_state.shard_id)
return self._TASK_DIRECTIVE.RETRY_TASK
# Validate slice id.
# Taskqueue executes old successful tasks.
if shard_state.slice_id > tstate.slice_id:
logging.warning(
"Task %s-%s is behind ShardState %s. Dropping task.""",
tstate.shard_id, tstate.slice_id, shard_state.slice_id)
return self._TASK_DIRECTIVE.DROP_TASK
# By the end of last slice, task enqueue succeeded but datastore commit
# failed. That transaction will be retried and adding the same task
# will pass. User data is duplicated in this case.
elif shard_state.slice_id < tstate.slice_id:
logging.warning(
"Task %s-%s is ahead of ShardState %s. Waiting for it to catch up.",
tstate.shard_id, tstate.slice_id, shard_state.slice_id)
return self._TASK_DIRECTIVE.RETRY_TASK
# Check potential duplicated tasks for the same slice.
# See model.ShardState doc.
if shard_state.slice_start_time:
countdown = self._wait_time(shard_state,
parameters._LEASE_DURATION_SEC)
if countdown > 0:
logging.warning(
"Last retry of slice %s-%s may be still running."
"Will try again in %s seconds", tstate.shard_id, tstate.slice_id,
countdown)
# TODO(user): There might be a better way. Taskqueue's countdown
# only applies to add new tasks, not retry of tasks.
# Reduce contention.
time.sleep(countdown)
return self._TASK_DIRECTIVE.RETRY_TASK
# lease could have expired. Verify with logs API.
else:
if self._wait_time(shard_state,
parameters._MAX_LEASE_DURATION_SEC):
if not self._has_old_request_ended(shard_state):
logging.warning(
"Last retry of slice %s-%s is still in flight with request_id "
"%s. Will try again later.", tstate.shard_id, tstate.slice_id,
shard_state.slice_request_id)
return self._TASK_DIRECTIVE.RETRY_TASK
else:
logging.warning(
"Last retry of slice %s-%s has no log entry and has"
"timed out after %s seconds",
tstate.shard_id, tstate.slice_id,
parameters._MAX_LEASE_DURATION_SEC)
# Lease expired or slice_start_time not set.
config = util.create_datastore_write_config(tstate.mapreduce_spec)
@db.transactional(retries=5)
def _tx():
"""Use datastore to set slice_start_time to now.
If failed for any reason, raise error to retry the task (hence all
the previous validation code). The task would die naturally eventually.
Raises:
Rollback: If the shard state is missing.
Returns:
A _TASK_DIRECTIVE enum.
"""
fresh_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_state:
logging.warning("ShardState missing.")
raise db.Rollback()
if (fresh_state.active and
fresh_state.slice_id == shard_state.slice_id and
fresh_state.slice_start_time == shard_state.slice_start_time):
shard_state.slice_start_time = datetime.datetime.now()
shard_state.slice_request_id = os.environ.get("REQUEST_LOG_ID")
shard_state.acquired_once = True
shard_state.put(config=config)
return self._TASK_DIRECTIVE.PROCEED_TASK
else:
logging.warning(
"Contention on slice %s-%s execution. Will retry again.",
tstate.shard_id, tstate.slice_id)
# One proposer should win. In case all lost, back off arbitrarily.
time.sleep(random.randrange(1, 5))
return self._TASK_DIRECTIVE.RETRY_TASK
return _tx()
def _has_old_request_ended(self, shard_state):
"""Whether previous slice retry has ended according to Logs API.
Args:
shard_state: shard state.
Returns:
True if the request of previous slice retry has ended. False if it has
not or unknown.
"""
assert shard_state.slice_start_time is not None
assert shard_state.slice_request_id is not None
request_ids = [shard_state.slice_request_id]
logs = None
try:
logs = list(logservice.fetch(request_ids=request_ids))
except (apiproxy_errors.FeatureNotEnabledError,
apiproxy_errors.CapabilityDisabledError) as e:
# Managed VMs do not have access to the logservice API
# See https://groups.google.com/forum/#!topic/app-engine-managed-vms/r8i65uiFW0w
logging.warning("Ignoring exception: %s", e)
if not logs or not logs[0].finished:
return False
return True
def _wait_time(self, shard_state, secs, now=datetime.datetime.now):
"""Time to wait until slice_start_time is secs ago from now.
Args:
shard_state: shard state.
secs: duration in seconds.
now: a func that gets now.
Returns:
0 if no wait. A positive int in seconds otherwise. Always around up.
"""
assert shard_state.slice_start_time is not None
delta = now() - shard_state.slice_start_time
duration = datetime.timedelta(seconds=secs)
if delta < duration:
return util.total_seconds(duration - delta)
else:
return 0
def _try_free_lease(self, shard_state, slice_retry=False):
"""Try to free lease.
A lightweight transaction to update shard_state and unset
slice_start_time to allow the next retry to happen without blocking.
We don't care if this fails or not because the lease will expire
anyway.
Under normal execution, _save_state_and_schedule_next is the exit point.
It updates/saves shard state and schedules the next slice or returns.
Other exit points are:
1. _are_states_consistent: at the beginning of handle, checks
if datastore states and the task are in sync.
If not, raise or return.
2. _attempt_slice_retry: may raise exception to taskqueue.
3. _save_state_and_schedule_next: may raise exception when taskqueue/db
unreachable.
This handler should try to free the lease on every exceptional exit point.
Args:
shard_state: model.ShardState.
slice_retry: whether to count this as a failed slice execution.
"""
@db.transactional
def _tx():
fresh_state = model.ShardState.get_by_shard_id(shard_state.shard_id)
if fresh_state and fresh_state.active:
# Free lease.
fresh_state.slice_start_time = None
fresh_state.slice_request_id = None
if slice_retry:
fresh_state.slice_retries += 1
fresh_state.put()
try:
_tx()
# pylint: disable=broad-except
except Exception, e:
logging.warning(e)
logging.warning(
"Release lock for shard %s failed. Wait for lease to expire.",
shard_state.shard_id)
def _maintain_LC(self, obj, slice_id, last_slice=False, begin_slice=True,
shard_ctx=None, slice_ctx=None):
"""Makes sure shard life cycle interface are respected.
Args:
obj: the obj that may have implemented _ShardLifeCycle.
slice_id: current slice_id
last_slice: whether this is the last slice.
begin_slice: whether this is the beginning or the end of a slice.
shard_ctx: shard ctx for dependency injection. If None, it will be read
from self.
slice_ctx: slice ctx for dependency injection. If None, it will be read
from self.
"""
if obj is None or not isinstance(obj, shard_life_cycle._ShardLifeCycle):
return
shard_context = shard_ctx or self.shard_context
slice_context = slice_ctx or self.slice_context
if begin_slice:
if slice_id == 0:
obj.begin_shard(shard_context)
obj.begin_slice(slice_context)
else:
obj.end_slice(slice_context)
if last_slice:
obj.end_shard(shard_context)
def _lc_start_slice(self, tstate, slice_id):
self._maintain_LC(tstate.output_writer, slice_id)
self._maintain_LC(tstate.input_reader, slice_id)
self._maintain_LC(tstate.handler, slice_id)
def _lc_end_slice(self, tstate, slice_id, last_slice=False):
self._maintain_LC(tstate.handler, slice_id, last_slice=last_slice,
begin_slice=False)
self._maintain_LC(tstate.input_reader, slice_id, last_slice=last_slice,
begin_slice=False)
self._maintain_LC(tstate.output_writer, slice_id, last_slice=last_slice,
begin_slice=False)
def handle(self):
"""Handle request.
This method has to be careful to pass the same ShardState instance to
its subroutines calls if the calls mutate or read from ShardState.
Note especially that Context instance caches and updates the ShardState
instance.
Returns:
Set HTTP status code and always returns None.
"""
# Reconstruct basic states.
self._start_time = self._time()
shard_id = self.request.headers[util._MR_SHARD_ID_TASK_HEADER]
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
spec = model.MapreduceSpec._get_mapreduce_spec(mr_id)
shard_state, control = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceControl.get_key_by_job_id(mr_id),
])
# Set context before any IO code is called.
ctx = context.Context(spec, shard_state,
task_retry_count=self.task_retry_count())
context.Context._set(ctx)
# Unmarshall input reader, output writer, and other transient states.
tstate = model.TransientShardState.from_request(self.request)
# Try acquire a lease on the shard.
if shard_state:
is_this_a_retry = shard_state.acquired_once
task_directive = self._try_acquire_lease(shard_state, tstate)
if task_directive in (self._TASK_DIRECTIVE.RETRY_TASK,
self._TASK_DIRECTIVE.DROP_TASK):
return self.__return(shard_state, tstate, task_directive)
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
# Abort shard if received signal.
if control and control.command == model.MapreduceControl.ABORT:
task_directive = self._TASK_DIRECTIVE.ABORT_SHARD
return self.__return(shard_state, tstate, task_directive)
# Retry shard if user disabled slice retry.
if (is_this_a_retry and
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS <= 1):
task_directive = self._TASK_DIRECTIVE.RETRY_SHARD
return self.__return(shard_state, tstate, task_directive)
# TODO(user): Find a better way to set these per thread configs.
# E.g. what if user change it?
util._set_ndb_cache_policy()
job_config = map_job.JobConfig._to_map_job_config(
spec,
os.environ.get("HTTP_X_APPENGINE_QUEUENAME"))
job_context = map_job_context.JobContext(job_config)
self.shard_context = map_job_context.ShardContext(job_context, shard_state)
self.slice_context = map_job_context.SliceContext(self.shard_context,
shard_state,
tstate)
try:
slice_id = tstate.slice_id
self._lc_start_slice(tstate, slice_id)
if shard_state.is_input_finished():
self._lc_end_slice(tstate, slice_id, last_slice=True)
# Finalize the stream and set status if there's no more input.
if (tstate.output_writer and
isinstance(tstate.output_writer, output_writers.OutputWriter)):
# It's possible that finalization is successful but
# saving state failed. In this case this shard will retry upon
# finalization error.
# TODO(user): make finalize method idempotent!
tstate.output_writer.finalize(ctx, shard_state)
shard_state.set_for_success()
return self.__return(shard_state, tstate, task_directive)
if is_this_a_retry:
task_directive = self._attempt_slice_recovery(shard_state, tstate)
if task_directive != self._TASK_DIRECTIVE.PROCEED_TASK:
return self.__return(shard_state, tstate, task_directive)
last_slice = self._process_inputs(
tstate.input_reader, shard_state, tstate, ctx)
self._lc_end_slice(tstate, slice_id)
ctx.flush()
if last_slice:
# We're done processing data but we still need to finalize the output
# stream. We save this condition in datastore and force a new slice.
# That way if finalize fails no input data will be retried.
shard_state.set_input_finished()
# pylint: disable=broad-except
except Exception, e:
logging.warning("Shard %s got error.", shard_state.shard_id)
logging.error(traceback.format_exc())
# Fail fast.
if type(e) is errors.FailJobError:
logging.error("Got FailJobError.")
task_directive = self._TASK_DIRECTIVE.FAIL_TASK
else:
task_directive = self._TASK_DIRECTIVE.RETRY_SLICE
self.__return(shard_state, tstate, task_directive)
def __return(self, shard_state, tstate, task_directive):
"""Handler should always call this as the last statement."""
task_directive = self._set_state(shard_state, tstate, task_directive)
self._save_state_and_schedule_next(shard_state, tstate, task_directive)
context.Context._set(None)
def _process_inputs(self,
input_reader,
shard_state,
tstate,
ctx):
"""Read inputs, process them, and write out outputs.
This is the core logic of MapReduce. It reads inputs from input reader,
invokes user specified mapper function, and writes output with
output writer. It also updates shard_state accordingly.
e.g. if shard processing is done, set shard_state.active to False.
If errors.FailJobError is caught, it will fail this MR job.
All other exceptions will be logged and raised to taskqueue for retry
until the number of retries exceeds a limit.
Args:
input_reader: input reader.
shard_state: shard state.
tstate: transient shard state.
ctx: mapreduce context.
Returns:
Whether this shard has finished processing all its input split.
"""
processing_limit = self._processing_limit(tstate.mapreduce_spec)
if processing_limit == 0:
return
finished_shard = True
# Input reader may not be an iterator. It is only a container.
iterator = iter(input_reader)
while True:
try:
entity = iterator.next()
except StopIteration:
break
# Reading input got exception. If we assume
# 1. The input reader have done enough retries.
# 2. The input reader can still serialize correctly after this exception.
# 3. The input reader, upon resume, will try to re-read this failed
# record.
# 4. This exception doesn't imply the input reader is permanently stuck.
# we can serialize current slice immediately to avoid duplicated
# outputs.
# TODO(user): Validate these assumptions on all readers. MR should
# also have a way to detect fake forward progress.
if isinstance(entity, db.Model):
shard_state.last_work_item = repr(entity.key())
elif isinstance(entity, ndb.Model):
shard_state.last_work_item = repr(entity.key)
else:
shard_state.last_work_item = repr(entity)[:100]
processing_limit -= 1
if not self._process_datum(
entity, input_reader, ctx, tstate):
finished_shard = False
break
elif processing_limit == 0:
finished_shard = False
break
# Flush context and its pools.
self.slice_context.incr(
context.COUNTER_MAPPER_WALLTIME_MS,
int((self._time() - self._start_time)*1000))
return finished_shard
def _process_datum(self, data, input_reader, ctx, transient_shard_state):
"""Process a single data piece.
Call mapper handler on the data.
Args:
data: a datum to process.
input_reader: input reader.
ctx: mapreduce context
transient_shard_state: transient shard state.
Returns:
True if scan should be continued, False if scan should be stopped.
"""
if data is not input_readers.ALLOW_CHECKPOINT:
self.slice_context.incr(context.COUNTER_MAPPER_CALLS)
handler = transient_shard_state.handler
if isinstance(handler, map_job.Mapper):
handler(self.slice_context, data)
else:
if input_reader.expand_parameters:
result = handler(*data)
else:
result = handler(data)
if util.is_generator(result):
for output in result:
if isinstance(output, operation.Operation):
output(ctx)
else:
output_writer = transient_shard_state.output_writer
if not output_writer:
logging.warning(
"Handler yielded %s, but no output writer is set.", output)
else:
output_writer.write(output)
if self._time() - self._start_time >= parameters.config._SLICE_DURATION_SEC:
return False
return True
def _set_state(self, shard_state, tstate, task_directive):
"""Set shard_state and tstate based on task_directive.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: self._TASK_DIRECTIVE for current shard.
Returns:
A _TASK_DIRECTIVE enum.
PROCEED_TASK if task should proceed normally.
RETRY_SHARD if shard should be retried.
RETRY_SLICE if slice should be retried.
FAIL_TASK if sahrd should fail.
RECOVER_SLICE if slice should be recovered.
ABORT_SHARD if shard should be aborted.
RETRY_TASK if task should be retried.
DROP_TASK if task should be dropped.
"""
if task_directive in (self._TASK_DIRECTIVE.RETRY_TASK,
self._TASK_DIRECTIVE.DROP_TASK):
return task_directive
if task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
shard_state.set_for_abort()
return task_directive
if task_directive == self._TASK_DIRECTIVE.PROCEED_TASK:
shard_state.advance_for_next_slice()
tstate.advance_for_next_slice()
return task_directive
if task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
tstate.advance_for_next_slice(recovery_slice=True)
shard_state.advance_for_next_slice(recovery_slice=True)
return task_directive
if task_directive == self._TASK_DIRECTIVE.RETRY_SLICE:
task_directive = self._attempt_slice_retry(shard_state, tstate)
if task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
task_directive = self._attempt_shard_retry(shard_state, tstate)
if task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
shard_state.set_for_failure()
return task_directive
def _save_state_and_schedule_next(self, shard_state, tstate, task_directive):
"""Save state and schedule task.
Save shard state to datastore.
Schedule next slice if needed.
Set HTTP response code.
No modification to any shard_state or tstate.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: enum _TASK_DIRECTIVE.
Returns:
The task to retry if applicable.
"""
spec = tstate.mapreduce_spec
if task_directive == self._TASK_DIRECTIVE.DROP_TASK:
return
if task_directive in (self._TASK_DIRECTIVE.RETRY_SLICE,
self._TASK_DIRECTIVE.RETRY_TASK):
# Set HTTP code to 500.
return self.retry_task()
elif task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
logging.info("Aborting shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
logging.critical("Shard %s failed permanently.", shard_state.shard_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
logging.warning("Shard %s is going to be attempted for the %s time.",
shard_state.shard_id,
shard_state.retries + 1)
task = self._state_to_task(tstate, shard_state)
elif task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
logging.warning("Shard %s slice %s is being recovered.",
shard_state.shard_id,
shard_state.slice_id)
task = self._state_to_task(tstate, shard_state)
else:
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
countdown = self._get_countdown_for_next_slice(spec)
task = self._state_to_task(tstate, shard_state, countdown=countdown)
# Prepare parameters for db transaction and taskqueue.
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
# For test only.
# TODO(user): Remove this.
"default")
config = util.create_datastore_write_config(spec)
@db.transactional(retries=5)
def _tx():
"""The Transaction helper."""
fresh_shard_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_shard_state:
raise db.Rollback()
if (not fresh_shard_state.active or
"worker_active_state_collision" in _TEST_INJECTED_FAULTS):
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning("Datastore's %s", str(fresh_shard_state))
logging.warning("Slice's %s", str(shard_state))
return
fresh_shard_state.copy_from(shard_state)
fresh_shard_state.put(config=config)
# Add task in the same datastore transaction.
# This way we guarantee taskqueue is never behind datastore states.
# Old tasks will be dropped.
# Future task won't run until datastore states catches up.
if fresh_shard_state.active:
# Not adding task transactionally.
# transactional enqueue requires tasks with no name.
self._add_task(task, spec, queue_name)
try:
_tx()
except (datastore_errors.Error,
taskqueue.Error,
runtime.DeadlineExceededError,
apiproxy_errors.Error), e:
logging.warning(
"Can't transactionally continue shard. "
"Will retry slice %s %s for the %s time.",
tstate.shard_id,
tstate.slice_id,
self.task_retry_count() + 1)
self._try_free_lease(shard_state)
raise e
def _attempt_slice_recovery(self, shard_state, tstate):
"""Recover a slice.
This is run when a slice had been previously attempted and output
may have been written. If an output writer requires slice recovery,
we run those logic to remove output duplicates. Otherwise we just retry
the slice.
If recovery is needed, then the entire slice will be dedicated
to recovery logic. No data processing will take place. Thus we call
the slice "recovery slice". This is needed for correctness:
An output writer instance can be out of sync from its physical
medium only when the slice dies after acquring the shard lock but before
committing shard state to db. The worst failure case is when
shard state failed to commit after the NAMED task for the next slice was
added. Thus, recovery slice has a special logic to increment current
slice_id n to n+2. If the task for n+1 had been added, it will be dropped
because it is behind shard state.
Args:
shard_state: an instance of Model.ShardState.
tstate: an instance of Model.TransientShardState.
Returns:
_TASK_DIRECTIVE.PROCEED_TASK to continue with this retry.
_TASK_DIRECTIVE.RECOVER_SLICE to recover this slice.
The next slice will start at the same input as
this slice but output to a new instance of output writer.
Combining outputs from all writer instances is up to implementation.
"""
mapper_spec = tstate.mapreduce_spec.mapper
if not (tstate.output_writer and
tstate.output_writer._supports_slice_recovery(mapper_spec)):
return self._TASK_DIRECTIVE.PROCEED_TASK
tstate.output_writer = tstate.output_writer._recover(
tstate.mapreduce_spec, shard_state.shard_number,
shard_state.retries + 1)
return self._TASK_DIRECTIVE.RECOVER_SLICE
def _attempt_shard_retry(self, shard_state, tstate):
"""Whether to retry shard.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried.
FAIL_TASK otherwise.
"""
shard_attempts = shard_state.retries + 1
if shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS:
logging.warning(
"Shard attempt %s exceeded %s max attempts.",
shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS)
return self._TASK_DIRECTIVE.FAIL_TASK
if tstate.output_writer and (
not tstate.output_writer._supports_shard_retry(tstate)):
logging.warning("Output writer %s does not support shard retry.",
tstate.output_writer.__class__.__name__)
return self._TASK_DIRECTIVE.FAIL_TASK
shard_state.reset_for_retry()
logging.warning("Shard %s attempt %s failed with up to %s attempts.",
shard_state.shard_id,
shard_state.retries,
parameters.config.SHARD_MAX_ATTEMPTS)
output_writer = None
if tstate.output_writer:
output_writer = tstate.output_writer.create(
tstate.mapreduce_spec, shard_state.shard_number, shard_attempts + 1)
tstate.reset_for_retry(output_writer)
return self._TASK_DIRECTIVE.RETRY_SHARD
def _attempt_slice_retry(self, shard_state, tstate):
"""Attempt to retry this slice.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.
RETRY_SHARD if shard retry should be attempted.
"""
if (shard_state.slice_retries + 1 <
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS):
logging.warning(
"Slice %s %s failed for the %s of up to %s attempts "
"(%s of %s taskqueue execution attempts). "
"Will retry now.",
tstate.shard_id,
tstate.slice_id,
shard_state.slice_retries + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS,
self.task_retry_count() + 1,
parameters.config.TASK_MAX_ATTEMPTS)
# Clear info related to current exception. Otherwise, the real
# callstack that includes a frame for this method will show up
# in log.
sys.exc_clear()
self._try_free_lease(shard_state, slice_retry=True)
return self._TASK_DIRECTIVE.RETRY_SLICE
if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0:
logging.warning("Slice attempt %s exceeded %s max attempts.",
self.task_retry_count() + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS)
return self._TASK_DIRECTIVE.RETRY_SHARD
@staticmethod
def get_task_name(shard_id, slice_id, retry=0):
"""Compute single worker task name.
Args:
shard_id: shard id.
slice_id: slice id.
retry: current shard retry count.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrshard-%s-%s-retry-%s" % (
shard_id, slice_id, retry)
def _get_countdown_for_next_slice(self, spec):
"""Get countdown for next slice's task.
When user sets processing rate, we set countdown to delay task execution.
Args:
spec: model.MapreduceSpec
Returns:
countdown in int.
"""
countdown = 0
if self._processing_limit(spec) != -1:
countdown = max(
int(parameters.config._SLICE_DURATION_SEC -
(self._time() - self._start_time)), 0)
return countdown
@classmethod
def _state_to_task(cls,
tstate,
shard_state,
eta=None,
countdown=None):
"""Generate task for slice according to current states.
Args:
tstate: An instance of TransientShardState.
shard_state: An instance of ShardState.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
Returns:
A model.HugeTask instance for the slice specified by current states.
"""
base_path = tstate.base_path
task_name = MapperWorkerCallbackHandler.get_task_name(
tstate.shard_id,
tstate.slice_id,
tstate.retries)
headers = util._get_task_headers(tstate.mapreduce_spec.mapreduce_id)
headers[util._MR_SHARD_ID_TASK_HEADER] = tstate.shard_id
worker_task = model.HugeTask(
url=base_path + "/worker_callback/" + tstate.shard_id,
params=tstate.to_dict(),
name=task_name,
eta=eta,
countdown=countdown,
parent=shard_state,
headers=headers)
return worker_task
@classmethod
def _add_task(cls,
worker_task,
mapreduce_spec,
queue_name):
"""Schedule slice scanning by adding it to the task queue.
Args:
worker_task: a model.HugeTask task for slice. This is NOT a taskqueue
task.
mapreduce_spec: an instance of model.MapreduceSpec.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
"""
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_worker_task",
worker_task,
queue_name):
try:
# Not adding transactionally because worker_task has name.
# Named task is not allowed for transactional add.
worker_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r already exists. %s: %s",
worker_task.name,
e.__class__,
e)
def _processing_limit(self, spec):
"""Get the limit on the number of map calls allowed by this slice.
Args:
spec: a Mapreduce spec.
Returns:
The limit as a positive int if specified by user. -1 otherwise.
"""
processing_rate = float(spec.mapper.params.get("processing_rate", 0))
slice_processing_limit = -1
if processing_rate > 0:
slice_processing_limit = int(math.ceil(
parameters.config._SLICE_DURATION_SEC*processing_rate/
int(spec.mapper.shard_count)))
return slice_processing_limit
# Deprecated. Only used by old test cases.
# TODO(user): clean up tests.
@classmethod
def _schedule_slice(cls,
shard_state,
tstate,
queue_name=None,
eta=None,
countdown=None):
"""Schedule slice scanning by adding it to the task queue.
Args:
shard_state: An instance of ShardState.
tstate: An instance of TransientShardState.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
"""
queue_name = queue_name or os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
"default")
task = cls._state_to_task(tstate, shard_state, eta, countdown)
cls._add_task(task, tstate.mapreduce_spec, queue_name)
class ControllerCallbackHandler(base_handler.HugeTaskHandler):
"""Supervises mapreduce execution.
Is also responsible for gathering execution status from shards together.
This task is "continuously" running by adding itself again to taskqueue if
and only if mapreduce is still active. A mapreduce is active if it has
actively running shards.
"""
def __init__(self, *args):
"""Constructor."""
super(ControllerCallbackHandler, self).__init__(*args)
self._time = time.time
def _drop_gracefully(self):
"""Gracefully drop controller task.
This method is called when decoding controller task payload failed.
Upon this we mark ShardState and MapreduceState as failed so all
tasks can stop.
Writing to datastore is forced (ignore read-only mode) because we
want the tasks to stop badly, and if force_writes was False,
the job would have never been started.
"""
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
state = model.MapreduceState.get_by_job_id(mr_id)
if not state or not state.active:
return
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
config = util.create_datastore_write_config(state.mapreduce_spec)
puts = []
for ss in model.ShardState.find_all_by_mapreduce_state(state):
if ss.active:
ss.set_for_failure()
puts.append(ss)
# Avoid having too many shard states in memory.
if len(puts) > model.ShardState._MAX_STATES_IN_MEMORY:
db.put(puts, config=config)
puts = []
db.put(puts, config=config)
# Put mr_state only after all shard_states are put.
db.put(state, config=config)
def handle(self):
"""Handle request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
state, control = db.get([
model.MapreduceState.get_key_by_job_id(spec.mapreduce_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not state:
logging.warning("State not found for MR '%s'; dropping controller task.",
spec.mapreduce_id)
return
if not state.active:
logging.warning(
"MR %r is not active. Looks like spurious controller task execution.",
spec.mapreduce_id)
self._clean_up_mr(spec)
return
shard_states = model.ShardState.find_all_by_mapreduce_state(state)
self._update_state_from_shard_states(state, shard_states, control)
if state.active:
ControllerCallbackHandler.reschedule(
state, spec, self.serial_id() + 1)
def _update_state_from_shard_states(self, state, shard_states, control):
"""Update mr state by examing shard states.
Args:
state: current mapreduce state as MapreduceState.
shard_states: an iterator over shard states.
control: model.MapreduceControl entity.
"""
# Initialize vars.
state.active_shards, state.aborted_shards, state.failed_shards = 0, 0, 0
total_shards = 0
processed_counts = []
processed_status = []
state.counters_map.clear()
# Tally across shard states once.
for s in shard_states:
total_shards += 1
status = 'unknown'
if s.active:
state.active_shards += 1
status = 'running'
if s.result_status == model.ShardState.RESULT_SUCCESS:
status = 'success'
elif s.result_status == model.ShardState.RESULT_ABORTED:
state.aborted_shards += 1
status = 'aborted'
elif s.result_status == model.ShardState.RESULT_FAILED:
state.failed_shards += 1
status = 'failed'
# Update stats in mapreduce state by aggregating stats from shard states.
state.counters_map.add_map(s.counters_map)
processed_counts.append(s.counters_map.get(context.COUNTER_MAPPER_CALLS))
processed_status.append(status)
state.set_processed_counts(processed_counts, processed_status)
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
spec = state.mapreduce_spec
if total_shards != spec.mapper.shard_count:
logging.error("Found %d shard states. Expect %d. "
"Issuing abort command to job '%s'",
total_shards, spec.mapper.shard_count,
spec.mapreduce_id)
# We issue abort command to allow shards to stop themselves.
model.MapreduceControl.abort(spec.mapreduce_id)
# If any shard is active then the mr is active.
# This way, controller won't prematurely stop before all the shards have.
state.active = bool(state.active_shards)
if not control and (state.failed_shards or state.aborted_shards):
# Issue abort command if there are failed shards.
model.MapreduceControl.abort(spec.mapreduce_id)
if not state.active:
# Set final result status derived from shard states.
if state.failed_shards or not total_shards:
state.result_status = model.MapreduceState.RESULT_FAILED
# It's important failed shards is checked before aborted shards
# because failed shards will trigger other shards to abort.
elif state.aborted_shards:
state.result_status = model.MapreduceState.RESULT_ABORTED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
self._finalize_outputs(spec, state)
self._finalize_job(spec, state)
else:
@db.transactional(retries=5)
def _put_state():
"""The helper for storing the state."""
fresh_state = model.MapreduceState.get_by_job_id(spec.mapreduce_id)
# We don't check anything other than active because we are only
# updating stats. It's OK if they are briefly inconsistent.
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping controller task.", spec.mapreduce_id)
return
config = util.create_datastore_write_config(spec)
state.put(config=config)
_put_state()
def serial_id(self):
"""Get serial unique identifier of this task from request.
Returns:
serial identifier as int.
"""
return int(self.request.get("serial_id"))
@classmethod
def _finalize_outputs(cls, mapreduce_spec, mapreduce_state):
"""Finalize outputs.
Args:
mapreduce_spec: an instance of MapreduceSpec.
mapreduce_state: an instance of MapreduceState.
"""
# Only finalize the output writers if the job is successful.
if (mapreduce_spec.mapper.output_writer_class() and
mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS):
mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state)
@classmethod
def _finalize_job(cls, mapreduce_spec, mapreduce_state):
"""Finalize job execution.
Invokes done callback and save mapreduce state in a transaction,
and schedule necessary clean ups. This method is idempotent.
Args:
mapreduce_spec: an instance of MapreduceSpec
mapreduce_state: an instance of MapreduceState
"""
config = util.create_datastore_write_config(mapreduce_spec)
queue_name = util.get_queue_name(mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE))
done_callback = mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
done_task = None
if done_callback:
done_task = taskqueue.Task(
url=done_callback,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id,
util.CALLBACK_MR_ID_TASK_HEADER),
method=mapreduce_spec.params.get("done_callback_method", "POST"))
@db.transactional(retries=5)
def _put_state():
"""Helper to store state."""
fresh_state = model.MapreduceState.get_by_job_id(
mapreduce_spec.mapreduce_id)
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping task.", mapreduce_spec.mapreduce_id)
return
mapreduce_state.put(config=config)
# Enqueue done_callback if needed.
if done_task and not _run_task_hook(
mapreduce_spec.get_hooks(),
"enqueue_done_task",
done_task,
queue_name):
done_task.add(queue_name, transactional=True)
_put_state()
logging.info("Final result for job '%s' is '%s'",
mapreduce_spec.mapreduce_id, mapreduce_state.result_status)
cls._clean_up_mr(mapreduce_spec)
@classmethod
def _clean_up_mr(cls, mapreduce_spec):
FinalizeJobHandler.schedule(mapreduce_spec)
@staticmethod
def get_task_name(mapreduce_spec, serial_id):
"""Compute single controller task name.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrcontrol-%s-%s" % (
mapreduce_spec.mapreduce_id, serial_id)
@staticmethod
def controller_parameters(mapreduce_spec, serial_id):
"""Fill in controller task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by controller to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"serial_id": str(serial_id)}
@classmethod
def reschedule(cls,
mapreduce_state,
mapreduce_spec,
serial_id,
queue_name=None):
"""Schedule new update status callback task.
Args:
mapreduce_state: mapreduce state as model.MapreduceState
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
"""
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
controller_callback_task = model.HugeTask(
url=(mapreduce_spec.params["base_path"] + "/controller_callback/" +
mapreduce_spec.mapreduce_id),
name=task_name, params=task_params,
countdown=parameters.config._CONTROLLER_PERIOD_SEC,
parent=mapreduce_state,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
controller_callback_task,
queue_name):
try:
controller_callback_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class KickOffJobHandler(base_handler.TaskQueueHandler):
"""Taskqueue handler which kicks off a mapreduce processing.
This handler is idempotent.
Precondition:
The Model.MapreduceState entity for this mr is already created and
saved to datastore by StartJobHandler._start_map.
Request Parameters:
mapreduce_id: in string.
"""
# Datastore key used to save json serialized input readers.
_SERIALIZED_INPUT_READERS_KEY = "input_readers_for_mr_%s"
def handle(self):
"""Handles kick off request."""
# Get and verify mr state.
mr_id = self.request.get("mapreduce_id")
# Log the mr_id since this is started in an unnamed task
logging.info("Processing kickoff for job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Create input readers.
readers, serialized_readers_entity = self._get_input_readers(state)
if readers is None:
# We don't have any data. Finish map.
logging.warning("Found no mapper input data to process.")
state.active = False
state.result_status = model.MapreduceState.RESULT_SUCCESS
ControllerCallbackHandler._finalize_job(
state.mapreduce_spec, state)
return False
# Create output writers.
self._setup_output_writer(state)
# Save states and make sure we use the saved input readers for
# subsequent operations.
result = self._save_states(state, serialized_readers_entity)
if result is None:
readers, _ = self._get_input_readers(state)
elif not result:
return
queue_name = self.request.headers.get("X-AppEngine-QueueName")
KickOffJobHandler._schedule_shards(state.mapreduce_spec, readers,
queue_name,
state.mapreduce_spec.params["base_path"],
state)
ControllerCallbackHandler.reschedule(
state, state.mapreduce_spec, serial_id=0, queue_name=queue_name)
def _drop_gracefully(self):
"""See parent."""
mr_id = self.request.get("mapreduce_id")
logging.error("Failed to kick off job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Issue abort command just in case there are running tasks.
config = util.create_datastore_write_config(state.mapreduce_spec)
model.MapreduceControl.abort(mr_id, config=config)
# Finalize job and invoke callback.
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
ControllerCallbackHandler._finalize_job(state.mapreduce_spec, state)
def _get_input_readers(self, state):
"""Get input readers.
Args:
state: a MapreduceState model.
Returns:
A tuple: (a list of input readers, a model._HugeTaskPayload entity).
The payload entity contains the json serialized input readers.
(None, None) when input reader inplitting returned no data to process.
"""
serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY %
state.key().id_or_name())
serialized_input_readers = model._HugeTaskPayload.get_by_key_name(
serialized_input_readers_key, parent=state)
# Initialize input readers.
input_reader_class = state.mapreduce_spec.mapper.input_reader_class()
split_param = state.mapreduce_spec.mapper
if issubclass(input_reader_class, map_job.InputReader):
split_param = map_job.JobConfig._to_map_job_config(
state.mapreduce_spec,
os.environ.get("HTTP_X_APPENGINE_QUEUENAME"))
if serialized_input_readers is None:
readers = input_reader_class.split_input(split_param)
else:
readers = [input_reader_class.from_json_str(_json) for _json in
json.loads(zlib.decompress(
serialized_input_readers.payload))]
if not readers:
return None, None
# Update state and spec with actual shard count.
state.mapreduce_spec.mapper.shard_count = len(readers)
state.active_shards = len(readers)
# Prepare to save serialized input readers.
if serialized_input_readers is None:
# Use mr_state as parent so it can be easily cleaned up later.
serialized_input_readers = model._HugeTaskPayload(
key_name=serialized_input_readers_key, parent=state)
readers_json_str = [i.to_json_str() for i in readers]
serialized_input_readers.payload = zlib.compress(json.dumps(
readers_json_str))
return readers, serialized_input_readers
def _setup_output_writer(self, state):
if not state.writer_state:
output_writer_class = state.mapreduce_spec.mapper.output_writer_class()
if output_writer_class:
output_writer_class.init_job(state)
@db.transactional
def _save_states(self, state, serialized_readers_entity):
"""Run transaction to save state.
Args:
state: a model.MapreduceState entity.
serialized_readers_entity: a model._HugeTaskPayload entity containing
json serialized input readers.
Returns:
False if a fatal error is encountered and this task should be dropped
immediately. True if transaction is successful. None if a previous
attempt of this same transaction has already succeeded.
"""
mr_id = state.key().id_or_name()
fresh_state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(fresh_state, mr_id):
return False
if fresh_state.active_shards != 0:
logging.warning(
"Mapreduce %s already has active shards. Looks like spurious task "
"execution.", mr_id)
return None
config = util.create_datastore_write_config(state.mapreduce_spec)
db.put([state, serialized_readers_entity], config=config)
return True
@classmethod
def _schedule_shards(cls,
spec,
readers,
queue_name,
base_path,
mr_state):
"""Prepares shard states and schedules their execution.
Even though this method does not schedule shard task and save shard state
transactionally, it's safe for taskqueue to retry this logic because
the initial shard_state for each shard is the same from any retry.
This is an important yet reasonable assumption on model.ShardState.
Args:
spec: mapreduce specification as MapreduceSpec.
readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
mr_state: The MapReduceState of current job.
"""
# Create shard states.
shard_states = []
for shard_number, input_reader in enumerate(readers):
shard_state = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard_state.shard_description = str(input_reader)
shard_states.append(shard_state)
# Retrieves already existing shard states.
existing_shard_states = db.get(shard.key() for shard in shard_states)
existing_shard_keys = set(shard.key() for shard in existing_shard_states
if shard is not None)
# Save non existent shard states.
# Note: we could do this transactionally if necessary.
db.put((shard for shard in shard_states
if shard.key() not in existing_shard_keys),
config=util.create_datastore_write_config(spec))
# Create output writers.
writer_class = spec.mapper.output_writer_class()
writers = [None] * len(readers)
if writer_class:
for shard_number, shard_state in enumerate(shard_states):
writers[shard_number] = writer_class.create(
mr_state.mapreduce_spec,
shard_state.shard_number, shard_state.retries + 1,
mr_state.writer_state)
# Schedule ALL shard tasks.
# Since each task is named, _add_task will fall back gracefully if a
# task already exists.
for shard_number, (input_reader, output_writer) in enumerate(
zip(readers, writers)):
shard_id = model.ShardState.shard_id_from_number(
spec.mapreduce_id, shard_number)
task = MapperWorkerCallbackHandler._state_to_task(
model.TransientShardState(
base_path, spec, shard_id, 0, input_reader, input_reader,
output_writer=output_writer,
handler=spec.mapper.handler),
shard_states[shard_number])
MapperWorkerCallbackHandler._add_task(task,
spec,
queue_name)
@classmethod
def _check_mr_state(cls, state, mr_id):
"""Check MapreduceState.
Args:
state: an MapreduceState instance.
mr_id: mapreduce id.
Returns:
True if state is valid. False if not and this task should be dropped.
"""
if state is None:
logging.warning(
"Mapreduce State for job %s is missing. Dropping Task.",
mr_id)
return False
if not state.active:
logging.warning(
"Mapreduce %s is not active. Looks like spurious task "
"execution. Dropping Task.", mr_id)
return False
return True
class StartJobHandler(base_handler.PostJsonHandler):
"""Command handler starts a mapreduce job.
This handler allows user to start a mr via a web form. It's _start_map
method can also be used independently to start a mapreduce.
"""
def handle(self):
"""Handles start request."""
# Mapper spec as form arguments.
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_output_writer_spec = self.request.get("mapper_output_writer")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
# Default values.
mr_params = map_job.JobConfig._get_default_mr_params()
mr_params.update(params)
if "queue_name" in mapper_params:
mr_params["queue_name"] = mapper_params["queue_name"]
# Set some mapper param defaults if not present.
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or parameters.config.PROCESSING_RATE_PER_SEC)
# Validate the Mapper spec, handler, and input reader.
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", parameters.config.SHARD_COUNT)),
output_writer_spec=mapper_output_writer_spec)
mapreduce_id = self._start_map(
mapreduce_name,
mapper_spec,
mr_params,
queue_name=mr_params["queue_name"],
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id
def _get_params(self, validator_parameter, name_prefix):
"""Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
Returns:
The user parameters.
"""
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
errors.NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise errors.NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _start_map(cls,
name,
mapper_spec,
mapreduce_params,
queue_name,
eta=None,
countdown=None,
hooks_class_name=None,
_app=None,
in_xg_transaction=False):
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
"""See control.start_map.
Requirements for this method:
1. The request that invokes this method can either be regular or
from taskqueue. So taskqueue specific headers can not be used.
2. Each invocation transactionally starts an isolated mapreduce job with
a unique id. MapreduceState should be immediately available after
returning. See control.start_map's doc on transactional.
3. Method should be lightweight.
"""
# Validate input reader.
mapper_input_reader_class = mapper_spec.input_reader_class()
mapper_input_reader_class.validate(mapper_spec)
# Validate output writer.
mapper_output_writer_class = mapper_spec.output_writer_class()
if mapper_output_writer_class:
mapper_output_writer_class.validate(mapper_spec)
# Create a new id and mr spec.
mapreduce_id = model.MapreduceState.new_mapreduce_id()
mapreduce_spec = model.MapreduceSpec(
name,
mapreduce_id,
mapper_spec.to_json(),
mapreduce_params,
hooks_class_name)
# Validate mapper handler.
ctx = context.Context(mapreduce_spec, None)
context.Context._set(ctx)
try:
# pylint: disable=pointless-statement
mapper_spec.handler
finally:
context.Context._set(None)
# Save states and enqueue task.
if in_xg_transaction:
propagation = db.MANDATORY
else:
propagation = db.INDEPENDENT
@db.transactional(propagation=propagation)
def _txn():
cls._create_and_save_state(mapreduce_spec, _app)
cls._add_kickoff_task(mapreduce_params["base_path"], mapreduce_spec, eta,
countdown, queue_name)
_txn()
return mapreduce_id
@classmethod
def _create_and_save_state(cls, mapreduce_spec, _app):
"""Save mapreduce state to datastore.
Save state to datastore so that UI can see it immediately.
Args:
mapreduce_spec: model.MapreduceSpec,
_app: app id if specified. None otherwise.
Returns:
The saved Mapreduce state.
"""
state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = 0
if _app:
state.app_id = _app
config = util.create_datastore_write_config(mapreduce_spec)
state.put(config=config)
return state
@classmethod
def _add_kickoff_task(cls,
base_path,
mapreduce_spec,
eta,
countdown,
queue_name):
"""Enqueues a new kickoff task."""
params = {"mapreduce_id": mapreduce_spec.mapreduce_id}
# Task is not named so that it can be added within a transaction.
kickoff_task = taskqueue.Task(
url=base_path + "/kickoffjob_callback/" + mapreduce_spec.mapreduce_id,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id),
params=params,
eta=eta,
countdown=countdown)
hooks = mapreduce_spec.get_hooks()
if hooks is not None:
try:
hooks.enqueue_kickoff_task(kickoff_task, queue_name)
return
except NotImplementedError:
pass
kickoff_task.add(queue_name, transactional=True)
class FinalizeJobHandler(base_handler.TaskQueueHandler):
"""Finalize map job by deleting all temporary entities."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
if mapreduce_state:
config = (
util.create_datastore_write_config(mapreduce_state.mapreduce_spec))
keys = [model.MapreduceControl.get_key_by_job_id(mapreduce_id)]
for ss in model.ShardState.find_all_by_mapreduce_state(mapreduce_state):
keys.extend(list(
model._HugeTaskPayload.all().ancestor(ss).run(keys_only=True)))
keys.extend(list(model._HugeTaskPayload.all().ancestor(
mapreduce_state).run(keys_only=True)))
db.delete(keys, config=config)
@classmethod
def schedule(cls, mapreduce_spec):
"""Schedule finalize task.
Args:
mapreduce_spec: mapreduce specification as MapreduceSpec.
"""
task_name = mapreduce_spec.mapreduce_id + "-finalize"
finalize_task = taskqueue.Task(
name=task_name,
url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" +
mapreduce_spec.mapreduce_id),
params={"mapreduce_id": mapreduce_spec.mapreduce_id},
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
queue_name = util.get_queue_name(None)
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
finalize_task,
queue_name):
try:
finalize_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r already exists. %s: %s",
task_name, e.__class__, e)
class CleanUpJobHandler(base_handler.PostJsonHandler):
"""Command to kick off tasks to clean up a job's data."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
if mapreduce_state:
shard_keys = model.ShardState.calculate_keys_by_mapreduce_state(
mapreduce_state)
db.delete(shard_keys)
db.delete(mapreduce_state)
self.json_response["status"] = ("Job %s successfully cleaned up." %
mapreduce_id)
class AbortJobHandler(base_handler.PostJsonHandler):
"""Command to abort a running job."""
def handle(self):
model.MapreduceControl.abort(self.request.get("mapreduce_id"))
self.json_response["status"] = "Abort signal sent."
|
TNT-Samuel/Coding-Projects
|
refs/heads/master
|
DNS Server/Source - Copy/Lib/site-packages/pkg_resources/_vendor/packaging/version.py
|
1151
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
xAKLx/pox
|
refs/heads/carp
|
pox/datapaths/pcap_switch.py
|
42
|
# Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Software switch with PCap ports
Example:
./pox.py --no-openflow datapaths.pcap_switch --address=localhost
"""
from pox.core import core
from pox.datapaths import do_launch
from pox.datapaths.switch import SoftwareSwitchBase, OFConnection
from pox.datapaths.switch import ExpireMixin
import pox.lib.pxpcap as pxpcap
from Queue import Queue
from threading import Thread
import pox.openflow.libopenflow_01 as of
from pox.lib.packet import ethernet
import logging
log = core.getLogger()
DEFAULT_CTL_PORT = 7791
_switches = {}
def _do_ctl (event):
r = _do_ctl2(event)
if r is None:
r = "Okay."
event.worker.send(r + "\n")
def _do_ctl2 (event):
def errf (msg, *args):
raise RuntimeError(msg % args)
args = event.args
def ra (low, high = None):
if high is None: high = low
if len(args) < low or len(args) > high:
raise RuntimeError("Wrong number of arguments")
return False
try:
if event.first == "add-port":
ra(1,2)
if len(event.args) == 1 and len(_switches) == 1:
sw = _switches[_switches.keys()[0]]
p = args[0]
else:
ra(2)
if event.args[0] not in _switches:
raise RuntimeError("No such switch")
sw = _switches[event.args[0]]
p = args[1]
sw.add_interface(p, start=True, on_error=errf)
elif event.first == "del-port":
ra(1,2)
if len(event.args) == 1:
for sw in _switches.values():
for p in sw.ports:
if p.name == event.args[0]:
sw.remove_interface(event.args[0])
return
raise RuntimeError("No such interface")
sw = _switches[event.args[0]]
sw.remove_interface(args[1])
elif event.first == "show":
ra(0)
s = []
for sw in _switches.values():
s.append("Switch %s" % (sw.name,))
for no,p in sw.ports.iteritems():
s.append(" %3s %s" % (no, p.name))
return "\n".join(s)
else:
raise RuntimeError("Unknown command")
except Exception as e:
log.exception("While processing command")
return "Error: " + str(e)
def launch (address = '127.0.0.1', port = 6633, max_retry_delay = 16,
dpid = None, ports = '', extra = None, ctl_port = None,
__INSTANCE__ = None):
"""
Launches a switch
"""
if not pxpcap.enabled:
raise RuntimeError("You need PXPCap to use this component")
if ctl_port:
if core.hasComponent('ctld'):
raise RuntimeError("Only one ctl_port is allowed")
if ctl_port is True:
ctl_port = DEFAULT_CTL_PORT
import ctl
ctl.server(ctl_port)
core.ctld.addListenerByName("CommandEvent", _do_ctl)
_ports = ports.strip()
def up (event):
ports = [p for p in _ports.split(",") if p]
sw = do_launch(PCapSwitch, address, port, max_retry_delay, dpid,
ports=ports, extra_args=extra)
_switches[sw.name] = sw
core.addListenerByName("UpEvent", up)
class PCapSwitch (ExpireMixin, SoftwareSwitchBase):
# Default level for loggers of this class
default_log_level = logging.INFO
def __init__ (self, **kw):
"""
Create a switch instance
Additional options over superclass:
log_level (default to default_log_level) is level for this instance
ports is a list of interface names
"""
log_level = kw.pop('log_level', self.default_log_level)
self.q = Queue()
self.t = Thread(target=self._consumer_threadproc)
core.addListeners(self)
ports = kw.pop('ports', [])
kw['ports'] = []
super(PCapSwitch,self).__init__(**kw)
self._next_port = 1
self.px = {}
for p in ports:
self.add_interface(p, start=False)
self.log.setLevel(log_level)
for px in self.px.itervalues():
px.start()
self.t.start()
def add_interface (self, name, port_no=-1, on_error=None, start=False):
if on_error is None:
on_error = log.error
devs = pxpcap.PCap.get_devices()
if name not in devs:
on_error("Device %s not available -- ignoring", name)
return
dev = devs[name]
if dev.get('addrs',{}).get('ethernet',{}).get('addr') is None:
on_error("Device %s has no ethernet address -- ignoring", name)
return
if dev.get('addrs',{}).get('AF_INET') != None:
on_error("Device %s has an IP address -- ignoring", name)
return
for no,p in self.px.iteritems():
if p.device == name:
on_error("Device %s already added", name)
if port_no == -1:
while True:
port_no = self._next_port
self._next_port += 1
if port_no not in self.ports: break
if port_no in self.ports:
on_error("Port %s already exists -- ignoring", port_no)
return
phy = of.ofp_phy_port()
phy.port_no = port_no
phy.hw_addr = dev['addrs']['ethernet']['addr']
phy.name = name
# Fill in features sort of arbitrarily
phy.curr = of.OFPPF_10MB_HD
phy.advertised = of.OFPPF_10MB_HD
phy.supported = of.OFPPF_10MB_HD
phy.peer = of.OFPPF_10MB_HD
self.add_port(phy)
px = pxpcap.PCap(name, callback = self._pcap_rx, start = False)
px.port_no = phy.port_no
self.px[phy.port_no] = px
if start:
px.start()
return px
def remove_interface (self, name_or_num):
if isinstance(name_or_num, basestring):
for no,p in self.px.iteritems():
if p.device == name_or_num:
self.remove_interface(no)
return
raise ValueError("No such interface")
px = self.px[name_or_num]
px.stop()
px.port_no = None
self.delete_port(name_or_num)
def _handle_GoingDownEvent (self, event):
self.q.put(None)
def _consumer_threadproc (self):
timeout = 3
while core.running:
try:
data = self.q.get(timeout=timeout)
except:
continue
if data is None:
# Signal to quit
break
batch = []
while True:
self.q.task_done()
port_no,data = data
data = ethernet(data)
batch.append((data,port_no))
try:
data = self.q.get(block=False)
except:
break
core.callLater(self.rx_batch, batch)
def rx_batch (self, batch):
for data,port_no in batch:
self.rx_packet(data, port_no)
def _pcap_rx (self, px, data, sec, usec, length):
if px.port_no is None: return
self.q.put((px.port_no, data))
def _output_packet_physical (self, packet, port_no):
"""
send a packet out a single physical port
This is called by the more general _output_packet().
"""
px = self.px.get(port_no)
if not px: return
px.inject(packet)
|
pbanaszkiewicz/amy
|
refs/heads/develop
|
amy/workshops/management/commands/instructors_activity.py
|
1
|
import logging
import os
from django.core.mail import send_mail
from django.core.management.base import BaseCommand
from django.template.loader import get_template
from workshops.models import Badge, Person, Role
logger = logging.getLogger()
class Command(BaseCommand):
help = "Report instructors activity."
def add_arguments(self, parser):
parser.add_argument(
"--send-out-for-real",
action="store_true",
default=False,
help="Send information to the instructors.",
)
parser.add_argument(
"--no-may-contact-only",
action="store_true",
default=False,
help="Include instructors not willing to be contacted.",
)
parser.add_argument(
"--django-mailing",
action="store_true",
default=False,
help="Use Django mailing system. This requires some environmental "
"variables to be set, see `settings.py`.",
)
parser.add_argument(
"-s",
"--sender",
action="store",
default="workshops@carpentries.org",
help='E-mail used in "from:" field.',
)
def foreign_tasks(self, tasks, person, roles):
"""List of other instructors' tasks, per event."""
return [
task.event.task_set.filter(role__in=roles)
.exclude(person=person)
.select_related("person")
for task in tasks
]
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=["instructor", "helper"])
instructor_badges = Badge.objects.instructor_badges()
instructors = Person.objects.filter(badges__in=instructor_badges)
instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related("airport").prefetch_related(
"task_set", "lessons", "award_set", "badges"
)
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles).select_related(
"event", "role"
)
record = {
"person": person,
"lessons": person.lessons.all(),
"instructor_awards": person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
"tasks": zip(tasks, self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result
def make_message(self, record):
tmplt = get_template("mailing/instructor_activity.txt")
return tmplt.render(context=record)
def subject(self, record):
# in future we can vary the subject depending on the record details
return "Updating your Software Carpentry information"
def recipient(self, record):
return record["person"].email
def send_message(
self, subject, message, sender, recipient, for_real=False, django_mailing=False
):
if for_real:
if django_mailing:
send_mail(subject, message, sender, [recipient])
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, "w")
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write("-" * 40 + "\n")
self.stdout.write("To: {}\n".format(recipient))
self.stdout.write("Subject: {}\n".format(subject))
self.stdout.write("From: {}\n".format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + "\n")
def handle(self, *args, **options):
# default is dummy run - only actually send mail if told to
send_for_real = options["send_out_for_real"]
# by default include only instructors who have `may_contact==True`
no_may_contact_only = options["no_may_contact_only"]
# use mailing options from settings.py or the `mail` system command?
django_mailing = options["django_mailing"]
# verbosity option is added by Django
self.verbosity = int(options["verbosity"])
sender = options["sender"]
results = self.fetch_activity(not no_may_contact_only)
for result in results:
message = self.make_message(result)
subject = self.subject(result)
recipient = self.recipient(result)
self.send_message(
subject,
message,
sender,
recipient,
for_real=send_for_real,
django_mailing=django_mailing,
)
if self.verbosity >= 1:
self.stdout.write("Sent {} emails.\n".format(len(results)))
|
guillermooo/dart-sublime-bundle-releases
|
refs/heads/master
|
sublime_plugin_lib/subprocess.py
|
1
|
# Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
from subprocess import Popen
import os
from . import PluginLogger
from .plat import supress_window
_logger = PluginLogger(__name__)
def killwin32(proc):
try:
path = os.path.expandvars("%WINDIR%\\System32\\taskkill.exe")
GenericBinary(show_window=False).start([path, "/pid", str(proc.pid)])
except Exception as e:
_logger.error(e)
class GenericBinary(object):
'''Starts a process.
'''
def __init__(self, *args, show_window=True):
'''
@show_window
Windows only. Whether to show a window.
'''
self.args = args
self.startupinfo = None
if not show_window:
self.startupinfo = supress_window()
def start(self, args=[], env=None, shell=False, cwd=None):
cmd = self.args + tuple(args)
_logger.debug('running cmd line (GenericBinary): %s', cmd)
Popen(cmd, startupinfo=self.startupinfo, env=env, shell=shell,
cwd=cwd)
|
Yukarumya/Yukarum-Redfoxes
|
refs/heads/master
|
media/webrtc/trunk/tools/gyp/test/mac/gyptest-xcuitest.py
|
12
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xcuitest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['7']:
test.pass_test()
CHDIR = 'xcuitest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=[
'-target', 'tests',
'-sdk', 'iphonesimulator',
])
test.pass_test()
|
romanchyla/pylucene-trunk
|
refs/heads/master
|
samples/LuceneInAction/DocumentUpdateTest.py
|
2
|
import os, sys, unittest, lucene
lucene.initVM()
sys.path.append(os.path.dirname(os.path.abspath(sys.argv[0])))
import lia.indexing.DocumentUpdateTest
unittest.main(lia.indexing.DocumentUpdateTest)
|
Vagab0nd/SiCKRAGE
|
refs/heads/master
|
lib3/timeago/locales/fr.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2017-8-30
@author: generated by @lolobosse script
'''
LOCALE = [
["à l'instant", "dans un instant"],
["il y a %s secondes", "dans %s secondes"],
["il y a 1 minute", "dans 1 minute"],
["il y a %s minutes", "dans %s minutes"],
["il y a 1 heure", "dans 1 heure"],
["il y a %s heures", "dans %s heures"],
["il y a 1 jour", "dans 1 jour"],
["il y a %s jours", "dans %s jours"],
["il y a 1 semaine", "dans 1 semaine"],
["il y a %s semaines", "dans %s semaines"],
["il y a 1 mois", "dans 1 mois"],
["il y a %s mois", "dans %s mois"],
["il y a 1 an", "dans 1 an"],
["il y a %s ans", "dans %s ans"]
]
|
Shaps/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/csvfile.py
|
80
|
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: csvfile
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "1.5"
short_description: read data from a TSV or CSV file
description:
- The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
The lookup looks for the row where the first column matches keyname, and returns the value in the second column, unless a different column is specified.
options:
col:
description: column to return (0 index).
default: "1"
default:
description: what to return if the value is not found in the file.
default: ''
delimiter:
description: field separator in the file, for a tab you can specify "TAB" or "t".
default: TAB
file:
description: name of the CSV/TSV file to open.
default: ansible.csv
encoding:
description: Encoding (character set) of the used CSV file.
default: utf-8
version_added: "2.1"
notes:
- The default is for TSV files (tab delimited) not CSV (comma delimited) ... yes the name is misleading.
"""
EXAMPLES = """
- name: Match 'Li' on the first column, return the second column (0 based index)
debug: msg="The atomic number of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=,') }}"
- name: msg="Match 'Li' on the first column, but return the 3rd column (columns start counting after the match)"
debug: msg="The atomic mass of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=, col=2') }}"
- name: Define Values From CSV File
set_fact:
loop_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=1') }}"
int_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=2') }}"
int_mask: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=3') }}"
int_name: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=4') }}"
local_as: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=5') }}"
neighbor_as: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=6') }}"
neigh_int_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=7') }}"
delegate_to: localhost
"""
RETURN = """
_raw:
description:
- value(s) stored in file column
"""
import codecs
import csv
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.six import PY2
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common._collections_compat import MutableSequence
class CSVRecoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding='utf-8'):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def __next__(self):
return next(self.reader).encode("utf-8")
next = __next__ # For Python 2
class CSVReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
if PY2:
f = CSVRecoder(f, encoding)
else:
f = codecs.getreader(encoding)(f)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def __next__(self):
row = next(self.reader)
return [to_text(s) for s in row]
next = __next__ # For Python 2
def __iter__(self):
return self
class LookupModule(LookupBase):
def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1):
try:
f = open(filename, 'rb')
creader = CSVReader(f, delimiter=to_native(delimiter), encoding=encoding)
for row in creader:
if len(row) and row[0] == key:
return row[int(col)]
except Exception as e:
raise AnsibleError("csvfile: %s" % to_native(e))
return dflt
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'col': "1", # column to return
'default': None,
'delimiter': "TAB",
'file': 'ansible.csv',
'encoding': 'utf-8',
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
if name not in paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'])
if var is not None:
if isinstance(var, MutableSequence):
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
|
jacobfilik/scanning
|
refs/heads/master
|
org.eclipse.scanning.example.messaging/scripts/examples/05_get_names.py
|
2
|
from utilities.connections import create_connection, set_handlers, subscribe_all, send_all
conn_dict = {'device-response-topic': "/topic/org.eclipse.scanning.response.device.topic",
'device-request-topic': "/topic/org.eclipse.scanning.request.device.topic",
}
class DeviceNamesHandler(object):
def on_message(self, headers, data):
print(data['deviceType'] + " names are:")
for device in data['devices']:
print device['name']
print("")
handlers = {'device-response-topic':DeviceNamesHandler()}
conn = create_connection()
set_handlers(conn, handlers)
subscribe_all(conn, conn_dict, handlers)
runnable_request = {"@type":"DeviceRequest","deviceType":"RUNNABLE","configure":"true"}
scannable_request = {"@type":"DeviceRequest","deviceType":"SCANNABLE","configure":"true"}
messages = [
('device-request-topic', runnable_request, 0),
('device-request-topic', scannable_request, 0),
]
send_all(conn, conn_dict, messages)
# Required to keep Python running indefinitely.
raw_input()
|
feroda/django
|
refs/heads/master
|
django/utils/log.py
|
3
|
from __future__ import unicode_literals
import logging
import sys
import warnings
from django.conf import settings
from django.core import mail
from django.core.mail import get_connection
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.views.debug import ExceptionReporter, get_exception_reporter_filter
# Default logging for Django. This sends an email to the site admins on every
# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
# the console (DEBUG=True) or discarded by mean of the NullHandler (DEBUG=False).
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
},
'py.warnings': {
'handlers': ['console'],
},
}
}
def configure_logging(logging_config, logging_settings):
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
# RemovedInNextVersionWarning is a subclass of DeprecationWarning which
# is hidden by default, hence we force the "default" behavior
warnings.simplefilter("default", RemovedInNextVersionWarning)
if logging_config:
# First find the logging configuration function ...
logging_config_func = import_string(logging_config)
logging.config.dictConfig(DEFAULT_LOGGING)
# ... then invoke it with the logging settings
if logging_settings:
logging_config_func(logging_settings)
class AdminEmailHandler(logging.Handler):
"""An exception log handler that emails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the email report.
"""
def __init__(self, include_html=False, email_backend=None):
logging.Handler.__init__(self)
self.include_html = include_html
self.email_backend = email_backend
def emit(self, record):
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
filter = get_exception_reporter_filter(request)
request_repr = '\n{}'.format(force_text(filter.get_request_repr(request)))
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
request_repr = "unavailable"
subject = self.format_subject(subject)
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
message = "%s\n\nRequest repr(): %s" % (self.format(record), request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = reporter.get_traceback_html() if self.include_html else None
self.send_mail(subject, message, fail_silently=True, html_message=html_message)
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_admins(subject, message, *args, connection=self.connection(), **kwargs)
def connection(self):
return get_connection(backend=self.email_backend, fail_silently=True)
def format_subject(self, subject):
"""
Escape CR and LF characters, and limit length.
RFC 2822's hard limit is 998 characters per line. So, minus "Subject: "
the actual subject must be no longer than 989 characters.
"""
formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
return formatted_subject[:989]
class CallbackFilter(logging.Filter):
"""
A logging filter that checks the return value of a given callable (which
takes the record-to-be-logged as its only parameter) to decide whether to
log a record.
"""
def __init__(self, callback):
self.callback = callback
def filter(self, record):
if self.callback(record):
return 1
return 0
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not settings.DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
|
shownomercy/django
|
refs/heads/master
|
django/conf/locale/mk/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
openaddresses/openaddresses
|
refs/heads/master
|
scripts/es/download_gml.py
|
5
|
# coding: utf-8
import sys, os, urllib
with open(sys.argv[1], 'r') as f:
urls = f.readlines()
for url in urls:
url = url.rstrip("\n")
if len(url) > 0
filename = sys.argv[2] + url.split('/')[-1]
urllib.urlretrieve(url, filename)
sys.stdout.write('.')
sys.stdout.flush()
# fix filenames -- unsure why this is necessary but it is
for filename in os.listdir(sys.argv[2]):
os.rename(filename, filename.strip())
|
masschallenge/django-accelerator
|
refs/heads/development
|
accelerator/tests/factories/bucket_state_factory.py
|
1
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
from datetime import (
datetime,
timedelta,
)
import swapper
from factory import SubFactory
from factory.django import DjangoModelFactory
from pytz import utc
from accelerator.models import (
STALE_NOSTARTUP_BUCKET_TYPE,
STALE_LEADS_GROUP,
)
from accelerator.tests.factories.program_cycle_factory import (
ProgramCycleFactory
)
from accelerator.tests.factories.program_factory import ProgramFactory
from accelerator.tests.factories.program_role_factory import ProgramRoleFactory
BucketState = swapper.load_model('accelerator', 'BucketState')
class BucketStateFactory(DjangoModelFactory):
class Meta:
model = BucketState
basis = BucketState.CYCLE_BASED
name = STALE_NOSTARTUP_BUCKET_TYPE
group = STALE_LEADS_GROUP
sort_order = 1
cycle = SubFactory(ProgramCycleFactory)
program = SubFactory(ProgramFactory)
last_update = utc.localize(datetime.now() - timedelta(1))
program_role = SubFactory(ProgramRoleFactory)
|
morphis/home-assistant
|
refs/heads/snap-support
|
homeassistant/components/sensor/sabnzbd.py
|
23
|
"""
Support for monitoring an SABnzbd NZB client.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.sabnzbd/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_API_KEY, CONF_NAME, CONF_PORT, CONF_MONITORED_VARIABLES,
CONF_SSL)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['https://github.com/jamespcole/home-assistant-nzb-clients/'
'archive/616cad59154092599278661af17e2a9f2cf5e2a9.zip'
'#python-sabnzbd==0.1']
_LOGGER = logging.getLogger(__name__)
_THROTTLED_REFRESH = None
DEFAULT_NAME = 'SABnzbd'
DEFAULT_PORT = 8080
DEFAULT_SSL = False
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
SENSOR_TYPES = {
'current_status': ['Status', None],
'speed': ['Speed', 'MB/s'],
'queue_size': ['Queue', 'MB'],
'queue_remaining': ['Left', 'MB'],
'disk_size': ['Disk', 'GB'],
'disk_free': ['Disk Free', 'GB'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=['current_status']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the SABnzbd sensors."""
from pysabnzbd import SabnzbdApi, SabnzbdApiException
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
api_key = config.get(CONF_API_KEY)
monitored_types = config.get(CONF_MONITORED_VARIABLES)
use_ssl = config.get(CONF_SSL)
if use_ssl:
uri_scheme = 'https://'
else:
uri_scheme = 'http://'
base_url = "{}{}:{}/".format(uri_scheme, host, port)
sab_api = SabnzbdApi(base_url, api_key)
try:
sab_api.check_available()
except SabnzbdApiException:
_LOGGER.error("Connection to SABnzbd API failed")
return False
# pylint: disable=global-statement
global _THROTTLED_REFRESH
_THROTTLED_REFRESH = Throttle(
MIN_TIME_BETWEEN_UPDATES)(sab_api.refresh_queue)
devices = []
for variable in monitored_types:
devices.append(SabnzbdSensor(variable, sab_api, name))
add_devices(devices)
class SabnzbdSensor(Entity):
"""Representation of an SABnzbd sensor."""
def __init__(self, sensor_type, sabnzb_client, client_name):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.sabnzb_client = sabnzb_client
self.type = sensor_type
self.client_name = client_name
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
# pylint: disable=no-self-use
def refresh_sabnzbd_data(self):
"""Call the throttled SABnzbd refresh method."""
if _THROTTLED_REFRESH is not None:
from pysabnzbd import SabnzbdApiException
try:
_THROTTLED_REFRESH()
except SabnzbdApiException:
_LOGGER.exception("Connection to SABnzbd API failed")
def update(self):
"""Get the latest data and updates the states."""
self.refresh_sabnzbd_data()
if self.sabnzb_client.queue:
if self.type == 'current_status':
self._state = self.sabnzb_client.queue.get('status')
elif self.type == 'speed':
mb_spd = float(self.sabnzb_client.queue.get('kbpersec')) / 1024
self._state = round(mb_spd, 1)
elif self.type == 'queue_size':
self._state = self.sabnzb_client.queue.get('mb')
elif self.type == 'queue_remaining':
self._state = self.sabnzb_client.queue.get('mbleft')
elif self.type == 'disk_size':
self._state = self.sabnzb_client.queue.get('diskspacetotal1')
elif self.type == 'disk_free':
self._state = self.sabnzb_client.queue.get('diskspace1')
else:
self._state = 'Unknown'
|
coderbone/SickRage-alt
|
refs/heads/master
|
lib/adba/aniDBmaper.py
|
70
|
#!/usr/bin/env python
#
# This file is part of aDBa.
#
# aDBa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aDBa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aDBa. If not, see <http://www.gnu.org/licenses/>.
from random import shuffle
class AniDBMaper:
blacklist = ('unused','retired','reserved')
def getAnimeBitsA(self,amask):
map = self.getAnimeMapA()
return self._getBitChain(map,amask)
def getAnimeCodesA(self,aBitChain):
amap = self.getAnimeMapA()
return self._getCodes(amap,aBitChain)
def getFileBitsF(self,fmask):
fmap = self.getFileMapF()
return self._getBitChain(fmap,fmask)
def getFileCodesF(self,bitChainF):
fmap = self.getFileMapF()
return self._getCodes(fmap,bitChainF)
def getFileBitsA(self,amask):
amap = self.getFileMapA()
return self._getBitChain(amap,amask)
def getFileCodesA(self,bitChainA):
amap = self.getFileMapA()
return self._getCodes(amap,bitChainA)
def _getBitChain(self,map,wanted):
"""Return an hex string with the correct bit set corresponding to the wanted fields in the map
"""
bit = 0
for index,field in enumerate(map):
if field in wanted and not field in self.blacklist:
bit = bit ^ (1<<len(map)-index-1)
bit = str(hex(bit)).lstrip("0x").rstrip("L")
bit = ''.join(["0" for unused in xrange(len(map)/4 - len(bit))])+bit
return bit
def _getCodes(self,map,bitChain):
"""Returns a list with the corresponding fields as set in the bitChain (hex string)
"""
codeList=[]
bitChain = int(bitChain,16)
mapLength = len(map)
for i in reversed(range(mapLength)):
if bitChain&(2**i):
codeList.append(map[mapLength-i-1])
return codeList
def getAnimeMapA(self):
# each line is one byte
# only chnage this if the api changes
map = ['aid','unused','year','type','related_aid_list','related_aid_type','category_list','category_weight_list',
'romaji_name','kanji_name','english_name','other_name','short_name_list','synonym_list','retired','retired',
'episodes','highest_episode_number','special_ep_count','air_date','end_date','url','picname','category_id_list',
'rating','vote_count','temp_rating','temp_vote_count','average_review_rating','review_count','award_list','is_18_restricted',
'anime_planet_id','ANN_id','allcinema_id','AnimeNfo_id','unused','unused','unused','date_record_updated',
'character_id_list','creator_id_list','main_creator_id_list','main_creator_name_list','unused','unused','unused','unused',
'specials_count','credits_count','other_count','trailer_count','parody_count','unused','unused','unused']
return map
def getFileMapF(self):
# each line is one byte
# only chnage this if the api changes
map = ['unused','aid','eid','gid','mylist_id','list_other_episodes','IsDeprecated','state',
'size','ed2k','md5','sha1','crc32','unused','unused','reserved',
'quality','source','audio_codec_list','audio_bitrate_list','video_codec','video_bitrate','video_resolution','file_type_extension',
'dub_language','sub_language','length_in_seconds','description','aired_date','unused','unused','anidb_file_name',
'mylist_state','mylist_filestate','mylist_viewed','mylist_viewdate','mylist_storage','mylist_source','mylist_other','unused']
return map
def getFileMapA(self):
# each line is one byte
# only chnage this if the api changes
map = ['anime_total_episodes','highest_episode_number','year','type','related_aid_list','related_aid_type','category_list','reserved',
'romaji_name','kanji_name','english_name','other_name','short_name_list','synonym_list','retired','retired',
'epno','ep_name','ep_romaji_name','ep_kanji_name','episode_rating','episode_vote_count','unused','unused',
'group_name','group_short_name','unused','unused','unused','unused','unused','date_aid_record_updated']
return map
def checkMapping(self,verbos=False):
print "------"
print "File F: "+ str(self.checkMapFileF(verbos))
print "------"
print "File A: "+ str(self.checkMapFileA(verbos))
def checkMapFileF(self,verbos=False):
getGeneralMap = lambda: self.getFileMapF()
getBits = lambda x: self.getFileBitsF(x)
getCodes = lambda x: self.getFileCodesF(x)
return self._checkMapGeneral(getGeneralMap,getBits,getCodes,verbos=verbos)
def checkMapFileA(self,verbos=False):
getGeneralMap = lambda: self.getFileMapA()
getBits = lambda x: self.getFileBitsA(x)
getCodes = lambda x: self.getFileCodesA(x)
return self._checkMapGeneral(getGeneralMap,getBits,getCodes,verbos=verbos)
def _checkMapGeneral(self,getGeneralMap,getBits,getCodes,verbos=False):
map = getGeneralMap()
shuffle(map)
mask = [elem for elem in map if elem not in self.blacklist][:5]
bits = getBits(mask)
mask_re = getCodes(bits)
bits_re = getBits(mask_re)
if verbos:
print mask
print mask_re
print bits
print bits_re
print "bits are:"+ str((bits_re == bits))
print "map is :"+ str((sorted(mask_re) == sorted(mask)))
return (bits_re == bits) and sorted(mask_re) == sorted(mask)
|
Syrcon/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/manifest/item.py
|
84
|
import os
import urlparse
from abc import ABCMeta, abstractmethod, abstractproperty
from utils import from_os_path, to_os_path
item_types = ["testharness", "reftest", "manual", "stub", "wdspec"]
def get_source_file(source_files, tests_root, manifest, path):
def make_new():
from sourcefile import SourceFile
return SourceFile(tests_root, path, manifest.url_base)
if source_files is None:
return make_new()
if path not in source_files:
source_files[path] = make_new()
return source_files[path]
class ManifestItem(object):
__metaclass__ = ABCMeta
item_type = None
def __init__(self, source_file, manifest=None):
self.manifest = manifest
self.source_file = source_file
@abstractproperty
def id(self):
"""The test's id (usually its url)"""
pass
@property
def path(self):
"""The test path relative to the test_root"""
return self.source_file.rel_path
@property
def https(self):
return "https" in self.source_file.meta_flags
def key(self):
"""A unique identifier for the test"""
return (self.item_type, self.id)
def meta_key(self):
"""Extra metadata that doesn't form part of the test identity, but for
which changes mean regenerating the manifest (e.g. the test timeout."""
return ()
def __eq__(self, other):
if not hasattr(other, "key"):
return False
return self.key() == other.key()
def __hash__(self):
return hash(self.key() + self.meta_key())
def to_json(self):
return {"path": from_os_path(self.path)}
@classmethod
def from_json(self, manifest, tests_root, obj, source_files=None):
raise NotImplementedError
class URLManifestItem(ManifestItem):
def __init__(self, source_file, url, url_base="/", manifest=None):
ManifestItem.__init__(self, source_file, manifest=manifest)
self._url = url
self.url_base = url_base
@property
def id(self):
return self.url
@property
def url(self):
return urlparse.urljoin(self.url_base, self._url)
def to_json(self):
rv = ManifestItem.to_json(self)
rv["url"] = self._url
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest,
to_os_path(obj["path"]))
return cls(source_file,
obj["url"],
url_base=manifest.url_base,
manifest=manifest)
class TestharnessTest(URLManifestItem):
item_type = "testharness"
def __init__(self, source_file, url, url_base="/", timeout=None, manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
self.timeout = timeout
def meta_key(self):
return (self.timeout,)
def to_json(self):
rv = URLManifestItem.to_json(self)
if self.timeout is not None:
rv["timeout"] = self.timeout
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest,
to_os_path(obj["path"]))
return cls(source_file,
obj["url"],
url_base=manifest.url_base,
timeout=obj.get("timeout"),
manifest=manifest)
class RefTest(URLManifestItem):
item_type = "reftest"
def __init__(self, source_file, url, references, url_base="/", timeout=None,
viewport_size=None, dpi=None, manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
for _, ref_type in references:
if ref_type not in ["==", "!="]:
raise ValueError, "Unrecognised ref_type %s" % ref_type
self.references = tuple(references)
self.timeout = timeout
self.viewport_size = viewport_size
self.dpi = dpi
@property
def is_reference(self):
return self.source_file.name_is_reference
def meta_key(self):
return (self.timeout, self.viewport_size, self.dpi)
def to_json(self):
rv = URLManifestItem.to_json(self)
rv["references"] = self.references
if self.timeout is not None:
rv["timeout"] = self.timeout
if self.viewport_size is not None:
rv["viewport_size"] = self.viewport_size
if self.dpi is not None:
rv["dpi"] = self.dpi
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest,
to_os_path(obj["path"]))
return cls(source_file,
obj["url"],
obj["references"],
url_base=manifest.url_base,
timeout=obj.get("timeout"),
viewport_size=obj.get("viewport_size"),
dpi=obj.get("dpi"),
manifest=manifest)
class ManualTest(URLManifestItem):
item_type = "manual"
class Stub(URLManifestItem):
item_type = "stub"
class WebdriverSpecTest(ManifestItem):
item_type = "wdspec"
@property
def id(self):
return self.path
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest,
to_os_path(obj["path"]))
return cls(source_file, manifest=manifest)
|
fangxingli/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/admin_scripts/custom_templates/project_template/additional_dir/extra.py
|
701
|
# this file uses the {{ extra }} variable
|
nert-gu/Xposition
|
refs/heads/master
|
src/wiki/plugins/images/admin.py
|
2
|
from django import forms
from django.contrib import admin
from . import models
class ImageForm(forms.ModelForm):
class Meta:
model = models.Image
exclude = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance.pk:
revisions = models.ImageRevision.objects.filter(
plugin=self.instance)
self.fields['current_revision'].queryset = revisions
else:
self.fields[
'current_revision'].queryset = models.ImageRevision.objects.none()
self.fields['current_revision'].widget = forms.HiddenInput()
class ImageRevisionInline(admin.TabularInline):
model = models.ImageRevision
extra = 1
fields = ('image', 'locked', 'deleted')
class ImageAdmin(admin.ModelAdmin):
form = ImageForm
inlines = (ImageRevisionInline,)
admin.site.register(models.Image, ImageAdmin)
|
gavinfaux/namebench
|
refs/heads/master
|
nb_third_party/jinja2/environment.py
|
199
|
# -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode, _encode_filename
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instanciated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, basestring):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneus environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.newline_sequence = newline_sequence
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.bytecode_cache = None
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in attributes.iteritems():
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in args.iteritems():
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in self.extensions.iteritems():
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, _encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = unicode(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), unicode(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, basestring):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = generate(source, self, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = _encode_filename(filename)
return compile(source, filename, 'exec')
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Compiles all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
import imp, struct, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0755 << 16L
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError, e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = compile(code, _encode_filename(filename), 'exec')
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = filter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
raise exc_type, exc_value, tb
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, basestring):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
newline_sequence, frozenset(extensions), optimized, undefined,
finalize, autoescape, None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec code in namespace
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(map(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return unicode(self).encode('utf-8')
# unicode goes after __str__ because we configured 2to3 to rename
# __unicode__ to __str__. because the 2to3 tree is not designed to
# remove nodes from it, we leave the above __str__ around and let
# it override at runtime.
def __unicode__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specifiy an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, basestring):
fp = file(fp, 'w')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = self._gen.next
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = generator(self._gen.next).next
def __iter__(self):
return self
def next(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
|
2gis/lode_runner
|
refs/heads/master
|
tests/test_multiple_decorators.py
|
1
|
import unittest
from nose.plugins.attrib import attr
from lode_runner.plugins.dataprovider import dataprovider
tests_to_run = set([
"test_multiple_decorators_slow_1",
"test_multiple_decorators_slow_2",
"test_multiple_decorators_fast_1",
"test_multiple_decorators_fast_2",
])
class MultipleDecoratorsTest(unittest.TestCase):
@dataprovider([1, 2])
@attr(speed='slow')
def test_multiple_decorators_slow(self, number):
tests_to_run.remove("%s_%s" % ("test_multiple_decorators_slow", number))
@attr(speed='fast')
@dataprovider([1, 2])
def test_multiple_decorators_fast(self, number):
tests_to_run.remove("%s_%s" % ("test_multiple_decorators_fast", number))
|
benob/chainer
|
refs/heads/master
|
chainer/optimizers/momentum_sgd.py
|
5
|
from chainer import cuda
from chainer import optimizer
class MomentumSGD(optimizer.GradientMethod):
"""Classical momentum SGD."""
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
def init_state(self, param, state):
xp = cuda.get_array_module(param.data)
with cuda.get_device(param.data):
state['v'] = xp.zeros_like(param.data)
def update_one_cpu(self, param, state):
v = state['v']
v *= self.momentum
v -= self.lr * param.grad
param.data += v
def update_one_gpu(self, param, state):
cuda.elementwise(
'T grad, T lr, T momentum',
'T param, T v',
'''v = momentum * v - lr * grad;
param += v;''',
'momentum_sgd')(param.grad, self.lr, self.momentum,
param.data, state['v'])
|
sheltowt/PIXLEE-computer-vision-clustering
|
refs/heads/master
|
PCV-master/examples/lktrack.py
|
18
|
from numpy import *
import cv2
# some constants and default parameters
lk_params = dict(winSize=(15,15),maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,0.03))
subpix_params = dict(zeroZone=(-1,-1),winSize=(10,10),
criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS,20,0.03))
feature_params = dict(maxCorners=500,qualityLevel=0.01,minDistance=10)
class LKTracker(object):
""" Class for Lucas-Kanade tracking with
pyramidal optical flow."""
def __init__(self,imnames):
""" Initialize with a list of image names. """
self.imnames = imnames
self.features = []
self.tracks = []
self.current_frame = 0
def step(self,framenbr=None):
""" Step to another frame. If no argument is
given, step to the next frame. """
if framenbr is None:
self.current_frame = (self.current_frame + 1) % len(self.imnames)
else:
self.current_frame = framenbr % len(self.imnames)
def detect_points(self):
""" Detect 'good features to track' (corners) in the current frame
using sub-pixel accuracy. """
# load the image and create grayscale
self.image = cv2.imread(self.imnames[self.current_frame])
self.gray = cv2.cvtColor(self.image,cv2.COLOR_BGR2GRAY)
# search for good points
features = cv2.goodFeaturesToTrack(self.gray, **feature_params)
# refine the corner locations
cv2.cornerSubPix(self.gray,features, **subpix_params)
self.features = features
self.tracks = [[p] for p in features.reshape((-1,2))]
self.prev_gray = self.gray
def track_points(self):
""" Track the detected features. """
if self.features != []:
self.step() # move to the next frame
# load the image and create grayscale
self.image = cv2.imread(self.imnames[self.current_frame])
self.gray = cv2.cvtColor(self.image,cv2.COLOR_BGR2GRAY)
# reshape to fit input format
tmp = float32(self.features).reshape(-1, 1, 2)
# calculate optical flow
features,status,track_error = cv2.calcOpticalFlowPyrLK(self.prev_gray,self.gray,tmp,None,**lk_params)
# remove points lost
self.features = [p for (st,p) in zip(status,features) if st]
# clean tracks from lost points
features = array(features).reshape((-1,2))
for i,f in enumerate(features):
self.tracks[i].append(f)
ndx = [i for (i,st) in enumerate(status) if not st]
ndx.reverse() #remove from back
for i in ndx:
self.tracks.pop(i)
self.prev_gray = self.gray
def track(self):
""" Generator for stepping through a sequence."""
for i in range(len(self.imnames)):
if self.features == []:
self.detect_points()
else:
self.track_points()
# create a copy in RGB
f = array(self.features).reshape(-1,2)
im = cv2.cvtColor(self.image,cv2.COLOR_BGR2RGB)
yield im,f
def draw(self):
""" Draw the current image with points using
OpenCV's own drawing functions.
Press ant key to close window."""
# draw points as green circles
for point in self.features:
cv2.circle(self.image,(int(point[0][0]),int(point[0][1])),3,(0,255,0),-1)
cv2.imshow('LKtrack',self.image)
cv2.waitKey()
|
dischinator/pyload
|
refs/heads/main
|
module/plugins/crypter/DuckCryptInfo.py
|
5
|
# -*- coding: utf-8 -*-
import re
import BeautifulSoup
from module.plugins.internal.Crypter import Crypter
class DuckCryptInfo(Crypter):
__name__ = "DuckCryptInfo"
__type__ = "crypter"
__version__ = "0.07"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?duckcrypt\.info/(folder|wait|link)/(\w+)/?(\w*)'
__config__ = [("activated" , "bool" , "Activated" , True ),
("use_premium" , "bool" , "Use premium account if available", True ),
("folder_per_package", "Default;Yes;No", "Create folder for each package" , "Default")]
__description__ = """DuckCrypt.info decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("godofdream", "soilfiction@gmail.com")]
TIMER_PATTERN = r'<span id="timer">(.*)</span>'
def decrypt(self, pyfile):
url = pyfile.url
m = re.match(self.__pattern__, url)
if m is None:
self.fail(_("Weird error in link"))
if str(m.group(1)) == "link":
self.handle_link(url)
else:
self.handle_folder(m)
def handle_folder(self, m):
html = self.load("http://duckcrypt.info/ajax/auth.php?hash=" + str(m.group(2)))
m = re.match(self.__pattern__, html)
self.log_debug("Redirect to " + m.group(0))
html = self.load(str(m.group(0)))
soup = BeautifulSoup.BeautifulSoup(html)
cryptlinks = soup.findAll("div", attrs={'class': "folderbox"})
self.log_debug("Redirect to " + cryptlinks)
if not cryptlinks:
self.error(_("No link found"))
for clink in cryptlinks:
if clink.find("a"):
self.handle_link(clink.find("a")['href'])
def handle_link(self, url):
html = self.load(url)
soup = BeautifulSoup(html)
self.links = [soup.find("iframe")['src']]
if not self.links:
self.log_info(_("No link found"))
|
FreeScienceCommunity/or-tools
|
refs/heads/master
|
examples/python/crossword2.py
|
32
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Crosswords in Google CP Solver.
This is a standard example for constraint logic programming. See e.g.
http://www.cis.temple.edu/~ingargio/cis587/readings/constraints.html
'''
We are to complete the puzzle
1 2 3 4 5
+---+---+---+---+---+ Given the list of words:
1 | 1 | | 2 | | 3 | AFT LASER
+---+---+---+---+---+ ALE LEE
2 | # | # | | # | | EEL LINE
+---+---+---+---+---+ HEEL SAILS
3 | # | 4 | | 5 | | HIKE SHEET
+---+---+---+---+---+ HOSES STEER
4 | 6 | # | 7 | | | KEEL TIE
+---+---+---+---+---+ KNOT
5 | 8 | | | | |
+---+---+---+---+---+
6 | | # | # | | # | The numbers 1,2,3,4,5,6,7,8 in the crossword
+---+---+---+---+---+ puzzle correspond to the words
that will start at those locations.
'''
The model was inspired by Sebastian Brand's Array Constraint cross word
example
http://www.cs.mu.oz.au/~sbrand/project/ac/
http://www.cs.mu.oz.au/~sbrand/project/ac/examples.pl
Also, see the following models:
* MiniZinc: http://www.hakank.org/minizinc/crossword.mzn
* Comet: http://www.hakank.org/comet/crossword.co
* ECLiPSe: http://hakank.org/eclipse/crossword2.ecl
* Gecode: http://hakank.org/gecode/crossword2.cpp
* SICStus: http://hakank.org/sicstus/crossword2.pl
* Zinc: http://hakank.org/minizinc/crossword2.zinc
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from google.apputils import app
from ortools.constraint_solver import pywrapcp
def main(_):
# Create the solver.
solver = pywrapcp.Solver("Problem")
#
# data
#
alpha = "_abcdefghijklmnopqrstuvwxyz"
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
g = 7
h = 8
i = 9
j = 10
k = 11
l = 12
m = 13
n = 14
o = 15
p = 16
q = 17
r = 18
s = 19
t = 20
u = 21
v = 22
w = 23
x = 24
y = 25
z = 26
num_words = 15
word_len = 5
AA = [
[h, o, s, e, s], # HOSES
[l, a, s, e, r], # LASER
[s, a, i, l, s], # SAILS
[s, h, e, e, t], # SHEET
[s, t, e, e, r], # STEER
[h, e, e, l, 0], # HEEL
[h, i, k, e, 0], # HIKE
[k, e, e, l, 0], # KEEL
[k, n, o, t, 0], # KNOT
[l, i, n, e, 0], # LINE
[a, f, t, 0, 0], # AFT
[a, l, e, 0, 0], # ALE
[e, e, l, 0, 0], # EEL
[l, e, e, 0, 0], # LEE
[t, i, e, 0, 0] # TIE
]
num_overlapping = 12
overlapping = [
[0, 2, 1, 0], # s
[0, 4, 2, 0], # s
[3, 1, 1, 2], # i
[3, 2, 4, 0], # k
[3, 3, 2, 2], # e
[6, 0, 1, 3], # l
[6, 1, 4, 1], # e
[6, 2, 2, 3], # e
[7, 0, 5, 1], # l
[7, 2, 1, 4], # s
[7, 3, 4, 2], # e
[7, 4, 2, 4] # r
]
n = 8
# declare variables
A = {}
for I in range(num_words):
for J in range(word_len):
A[(I, J)] = solver.IntVar(0, 26, "A(%i,%i)" % (I, J))
A_flat = [A[(I, J)] for I in range(num_words) for J in range(word_len)]
E = [solver.IntVar(0, num_words, "E%i" % I) for I in range(n)]
#
# constraints
#
solver.Add(solver.AllDifferent(E))
for I in range(num_words):
for J in range(word_len):
solver.Add(A[(I, J)] == AA[I][J])
for I in range(num_overlapping):
# This is what I would do:
# solver.Add(A[(E[overlapping[I][0]], overlapping[I][1])] == A[(E[overlapping[I][2]], overlapping[I][3])])
# But we must use Element explicitly
solver.Add(
solver.Element(
A_flat, E[overlapping[I][0]] * word_len + overlapping[I][1]) ==
solver.Element(
A_flat, E[overlapping[I][2]] * word_len + overlapping[I][3]))
#
# solution and search
#
solution = solver.Assignment()
solution.Add(E)
# db: DecisionBuilder
db = solver.Phase(E + A_flat,
solver.INT_VAR_SIMPLE,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print E
print_solution(A, E, alpha, n, word_len)
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
def print_solution(A, E, alpha, n, word_len):
for ee in range(n):
print "%i: (%2i)" % (ee, E[ee].Value()),
print "".join(["%s" % (alpha[A[ee, ii].Value()]) for ii in range(word_len)])
if __name__ == "__main__":
app.run()
|
BT-fgarbely/odoo
|
refs/heads/8.0
|
openerp/report/render/odt2odt/odt2odt.py
|
443
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render.rml2pdf import utils
import copy
class odt2odt(object):
def __init__(self, odt, localcontext):
self.localcontext = localcontext
self.etree = odt
self._node = None
def render(self):
def process_text(node,new_node):
for child in utils._child_get(node, self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
new_child.remove(n)
process_text(child, new_child)
else:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
self._node = copy.deepcopy(self.etree)
for n in self._node:
self._node.remove(n)
process_text(self.etree, self._node)
return self._node
def parseNode(node, localcontext = {}):
r = odt2odt(node, localcontext)
return r.render()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
KWierso/treeherder
|
refs/heads/master
|
treeherder/perf/migrations/0012_rename_summary_last_updated.py
|
4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-20 15:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('perf', '0011_inc_extra_options_length'),
]
operations = [
migrations.RenameField(
model_name='performancealertsummary',
old_name='last_updated',
new_name='created',
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.