repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
trivoldus28/pulsarch-verilog
|
refs/heads/master
|
tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/cgitb.py
|
3
|
"""More comprehensive traceback formatting for Python scripts.
To enable this module, do:
import cgitb; cgitb.enable()
at the top of your script. The optional arguments to enable() are:
display - if true, tracebacks are displayed in the web browser
logdir - if set, tracebacks are written to files in this directory
context - number of lines of source code to show for each stack frame
format - 'text' or 'html' controls the output format
By default, tracebacks are displayed but not saved, the context is 5 lines
and the output format is 'html' (for backwards compatibility with the
original use of this module)
Alternatively, if you have caught an exception and want cgitb to display it
for you, call cgitb.handler(). The optional argument to handler() is a
3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
The default handler displays output as HTML.
"""
__author__ = 'Ka-Ping Yee'
__version__ = '$Revision: 1.9 $'
import sys
def reset():
"""Return a string that resets the CGI and browser to a known state."""
return '''<!--: spam
Content-Type: text/html
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
</font> </font> </font> </script> </object> </blockquote> </pre>
</table> </table> </table> </table> </table> </font> </font> </font>'''
__UNDEF__ = [] # a special sentinel object
def small(text): return '<small>' + text + '</small>'
def strong(text): return '<strong>' + text + '</strong>'
def grey(text): return '<font color="#909090">' + text + '</font>'
def lookup(name, frame, locals):
"""Find the value for a given name in the given environment."""
if name in locals:
return 'local', locals[name]
if name in frame.f_globals:
return 'global', frame.f_globals[name]
if '__builtins__' in frame.f_globals:
builtins = frame.f_globals['__builtins__']
if type(builtins) is type({}):
if name in builtins:
return 'builtin', builtins[name]
else:
if hasattr(builtins, name):
return 'builtin', getattr(builtins, name)
return None, __UNDEF__
def scanvars(reader, frame, locals):
"""Scan one logical line of Python and look up values of variables used."""
import tokenize, keyword
vars, lasttoken, parent, prefix = [], None, None, ''
for ttype, token, start, end, line in tokenize.generate_tokens(reader):
if ttype == tokenize.NEWLINE: break
if ttype == tokenize.NAME and token not in keyword.kwlist:
if lasttoken == '.':
if parent is not __UNDEF__:
value = getattr(parent, token, __UNDEF__)
vars.append((prefix + token, prefix, value))
else:
where, value = lookup(token, frame, locals)
vars.append((token, where, value))
elif token == '.':
prefix += lasttoken + '.'
parent = value
else:
parent, prefix = None, ''
lasttoken = token
return vars
def html((etype, evalue, etb), context=5):
"""Return a nice HTML document describing a given traceback."""
import os, types, time, traceback, linecache, inspect, pydoc
if type(etype) is types.ClassType:
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
'<big><big><strong>%s</strong></big></big>' % str(etype),
'#ffffff', '#6622aa', pyver + '<br>' + date) + '''
<p>A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.'''
indent = '<tt>' + small(' ' * 5) + ' </tt>'
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + strong(func) + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.html.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
('<big> </big>', link, call)]
if index is not None:
i = lnum - index
for line in lines:
num = small(' ' * (5-len(str(i))) + str(i)) + ' '
line = '<tt>%s%s</tt>' % (num, pydoc.html.preformat(line))
if i in highlight:
rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
else:
rows.append('<tr><td>%s</td></tr>' % grey(line))
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where in ['global', 'builtin']:
name = ('<em>%s</em> ' % where) + strong(name)
elif where == 'local':
name = strong(name)
else:
name = where + strong(name.split('.')[-1])
dump.append('%s = %s' % (name, pydoc.html.repr(value)))
else:
dump.append(name + ' <em>undefined</em>')
rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
frames.append('''<p>
<table width="100%%" cellspacing=0 cellpadding=0 border=0>
%s</table>''' % '\n'.join(rows))
exception = ['<p>%s: %s' % (strong(str(etype)), str(evalue))]
if type(evalue) is types.InstanceType:
for name in dir(evalue):
if name[:1] == '_': continue
value = pydoc.html.repr(getattr(evalue, name))
exception.append('\n<br>%s%s =\n%s' % (indent, name, value))
import traceback
return head + ''.join(frames) + ''.join(exception) + '''
<!-- The above is a description of an error in a Python program, formatted
for a Web browser because the 'cgitb' module was enabled. In case you
are not reading this in a Web browser, here is the original traceback:
%s
-->
''' % ''.join(traceback.format_exception(etype, evalue, etb))
def text((etype, evalue, etb), context=5):
"""Return a plain text document describing a given traceback."""
import os, types, time, traceback, linecache, inspect, pydoc
if type(etype) is types.ClassType:
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.
'''
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + func + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.text.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = [' %s %s' % (file, call)]
if index is not None:
i = lnum - index
for line in lines:
num = '%5d ' % i
rows.append(num+line.rstrip())
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where == 'global': name = 'global ' + name
elif where == 'local': name = name
else: name = where + name.split('.')[-1]
dump.append('%s = %s' % (name, pydoc.text.repr(value)))
else:
dump.append(name + ' undefined')
rows.append('\n'.join(dump))
frames.append('\n%s\n' % '\n'.join(rows))
exception = ['%s: %s' % (str(etype), str(evalue))]
if type(evalue) is types.InstanceType:
for name in dir(evalue):
value = pydoc.text.repr(getattr(evalue, name))
exception.append('\n%s%s = %s' % (" "*4, name, value))
import traceback
return head + ''.join(frames) + ''.join(exception) + '''
The above is a description of an error in a Python program. Here is
the original traceback:
%s
''' % ''.join(traceback.format_exception(etype, evalue, etb))
class Hook:
"""A hook to replace sys.excepthook that shows tracebacks in HTML."""
def __init__(self, display=1, logdir=None, context=5, file=None,
format="html"):
self.display = display # send tracebacks to browser if true
self.logdir = logdir # log tracebacks to files if not None
self.context = context # number of source code lines per frame
self.file = file or sys.stdout # place to send the output
self.format = format
def __call__(self, etype, evalue, etb):
self.handle((etype, evalue, etb))
def handle(self, info=None):
info = info or sys.exc_info()
if self.format == "html":
self.file.write(reset())
formatter = (self.format=="html") and html or text
plain = False
try:
doc = formatter(info, self.context)
except: # just in case something goes wrong
import traceback
doc = ''.join(traceback.format_exception(*info))
plain = True
if self.display:
if plain:
doc = doc.replace('&', '&').replace('<', '<')
self.file.write('<pre>' + doc + '</pre>\n')
else:
self.file.write(doc + '\n')
else:
self.file.write('<p>A problem occurred in a Python script.\n')
if self.logdir is not None:
import os, tempfile
suffix = ['.html', '.txt'][self.format=="html"]
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
try:
file = os.fdopen(fd, 'w')
file.write(doc)
file.close()
msg = '<p> %s contains the description of this error.' % path
except:
msg = '<p> Tried to save traceback to %s, but failed.' % path
self.file.write(msg + '\n')
try:
self.file.flush()
except: pass
handler = Hook().handle
def enable(display=1, logdir=None, context=5, format="html"):
"""Install an exception handler that formats tracebacks as HTML.
The optional argument 'display' can be set to 0 to suppress sending the
traceback to the browser, and 'logdir' can be set to a directory to cause
tracebacks to be written to files there."""
sys.excepthook = Hook(display=display, logdir=logdir,
context=context, format=format)
|
ltilve/ChromiumGStreamerBackend
|
refs/heads/master
|
build/android/pylib/perf/test_options.py
|
28
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines the PerfOptions named tuple."""
import collections
PerfOptions = collections.namedtuple('PerfOptions', [
'steps',
'flaky_steps',
'output_json_list',
'print_step',
'no_timeout',
'test_filter',
'dry_run',
'single_step',
'collect_chartjson_data',
'output_chartjson_data',
'max_battery_temp',
'min_battery_level',
])
|
suiyuan2009/tensorflow
|
refs/heads/master
|
tensorflow/python/training/ftrl_test.py
|
68
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(test.TestCase):
def doTestFtrlwithoutRegularization(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]), v1_val)
def testFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=False)
def testResourceFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=True)
def testFtrlwithoutRegularization2(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), v1_val)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = ftrl.FtrlOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1]], var0.eval(), atol=0.01)
def testFtrlWithL1(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), v1_val)
def testFtrlWithL1_L2(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]), v1_val)
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
The addition of this parameter which places a constant pressure on weights
towards the origin causes the gradient descent trajectory to differ. The
weights will tend to have smaller magnitudes with this parameter set.
"""
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.22078767, -0.41378114]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.02919818, -0.07343706]), v1_val)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[0.0], [0.0]], dtype=dtype)
var1 = variables.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseGradientDescentwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
test.main()
|
Soya93/Extract-Refactoring
|
refs/heads/master
|
python/lib/Lib/modjy/modjy_response.py
|
109
|
###
#
# Copyright Alan Kennedy.
#
# You may contact the copyright holder at this uri:
#
# http://www.xhaus.com/contact/modjy
#
# The licence under which this code is released is the Apache License v2.0.
#
# The terms and conditions of this license are listed in a file contained
# in the distribution that also contained this file, under the name
# LICENSE.txt.
#
# You may also read a copy of the license at the following web address.
#
# http://modjy.xhaus.com/LICENSE.txt
#
###
import types
from java.lang import System
from modjy_exceptions import *
from modjy_write import write_object
# From: http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.5.1
hop_by_hop_headers = {
'connection': None,
'keep-alive': None,
'proxy-authenticate': None,
'proxy-authorization': None,
'te': None,
'trailers': None,
'transfer-encoding': None,
'upgrade': None,
}
class start_response_object:
def __init__(self, req, resp):
self.http_req = req
self.http_resp = resp
self.write_callable = None
self.called = 0
self.content_length = None
# I'm doing the parameters this way to facilitate porting back to java
def __call__(self, *args, **keywords):
if len(args) < 2 or len(args) > 3:
raise BadArgument("Start response callback requires either two or three arguments: got %s" % str(args))
if len(args) == 3:
exc_info = args[2]
try:
try:
self.http_resp.reset()
except IllegalStateException, isx:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
else:
if self.called > 0:
raise StartResponseCalledTwice("Start response callback may only be called once, without exception information.")
status_str = args[0]
headers_list = args[1]
if not isinstance(status_str, types.StringType):
raise BadArgument("Start response callback requires string as first argument")
if not isinstance(headers_list, types.ListType):
raise BadArgument("Start response callback requires list as second argument")
try:
status_code, status_message_str = status_str.split(" ", 1)
self.http_resp.setStatus(int(status_code))
except ValueError:
raise BadArgument("Status string must be of the form '<int> <string>'")
self.make_write_object()
try:
for header_name, header_value in headers_list:
header_name_lower = header_name.lower()
if hop_by_hop_headers.has_key(header_name_lower):
raise HopByHopHeaderSet("Under WSGI, it is illegal to set hop-by-hop headers, i.e. '%s'" % header_name)
if header_name_lower == "content-length":
try:
self.set_content_length(int(header_value))
except ValueError, v:
raise BadArgument("Content-Length header value must be a string containing an integer, not '%s'" % header_value)
else:
final_value = header_value.encode('latin-1')
# Here would be the place to check for control characters, whitespace, etc
self.http_resp.addHeader(header_name, final_value)
except (AttributeError, TypeError), t:
raise BadArgument("Start response callback headers must contain a list of (<string>,<string>) tuples")
except UnicodeError, u:
raise BadArgument("Encoding error: header values may only contain latin-1 characters, not '%s'" % repr(header_value))
except ValueError, v:
raise BadArgument("Headers list must contain 2-tuples")
self.called += 1
return self.write_callable
def set_content_length(self, length):
if self.write_callable.num_writes == 0:
self.content_length = length
self.http_resp.setContentLength(length)
else:
raise ResponseCommitted("Cannot set content-length: response is already commited.")
def make_write_object(self):
try:
self.write_callable = write_object(self.http_resp.getOutputStream())
except IOException, iox:
raise IOError(iox)
return self.write_callable
|
petrutlucian94/nova
|
refs/heads/master
|
nova/tests/unit/scheduler/filters/test_core_filters.py
|
38
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import core_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestCoreFilter(test.NoDBTestCase):
def test_core_filter_passes(self):
self.filt_cls = core_filter.CoreFilter()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
self.filt_cls = core_filter.CoreFilter()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
self.filt_cls = core_filter.CoreFilter()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_value_error(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
filter_properties = {'context': mock.sentinel.ctx,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
agg_mock.return_value = set(['XXX'])
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio')
self.assertEqual(4 * 2, host.limits['vcpu'])
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_default_value(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
filter_properties = {'context': mock.sentinel.ctx,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
agg_mock.return_value = set([])
# False: fallback to default flag w/o aggregates
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio')
# True: use ratio from aggregates
agg_mock.return_value = set(['3'])
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 3, host.limits['vcpu'])
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_core_filter_conflict_values(self, agg_mock):
self.filt_cls = core_filter.AggregateCoreFilter()
filter_properties = {'context': mock.sentinel.ctx,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=1)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
agg_mock.return_value = set(['2', '3'])
# use the minimum ratio from aggregates
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 2, host.limits['vcpu'])
|
duyet-website/api.duyet.net
|
refs/heads/master
|
lib/boto/mturk/price.py
|
170
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Price(object):
def __init__(self, amount=0.0, currency_code='USD'):
self.amount = amount
self.currency_code = currency_code
self.formatted_price = ''
def __repr__(self):
if self.formatted_price:
return self.formatted_price
else:
return str(self.amount)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Amount':
self.amount = float(value)
elif name == 'CurrencyCode':
self.currency_code = value
elif name == 'FormattedPrice':
self.formatted_price = value
def get_as_params(self, label, ord=1):
return {'%s.%d.Amount'%(label, ord) : str(self.amount),
'%s.%d.CurrencyCode'%(label, ord) : self.currency_code}
|
eayunstack/rally
|
refs/heads/product
|
tests/unit/plugins/openstack/context/quotas/test_manila_quotas.py
|
17
|
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.context.quotas import manila_quotas
from tests.unit import test
CLIENTS_CLASS = (
"rally.plugins.openstack.context.quotas.quotas.osclients.Clients")
class ManilaQuotasTestCase(test.TestCase):
@mock.patch(CLIENTS_CLASS)
def test_update(self, mock_clients):
instance = manila_quotas.ManilaQuotas(mock_clients)
tenant_id = mock.MagicMock()
quotas_values = {
"shares": 10,
"gigabytes": 13,
"snapshots": 7,
"snapshot_gigabytes": 51,
"share_networks": 1014,
}
instance.update(tenant_id, **quotas_values)
mock_clients.manila.return_value.quotas.update.assert_called_once_with(
tenant_id, **quotas_values)
@mock.patch(CLIENTS_CLASS)
def test_delete(self, mock_clients):
instance = manila_quotas.ManilaQuotas(mock_clients)
tenant_id = mock.MagicMock()
instance.delete(tenant_id)
mock_clients.manila.return_value.quotas.delete.assert_called_once_with(
tenant_id)
|
kaerdsar/addons-yelizariev
|
refs/heads/8.0
|
project_default_visibility_followers/__init__.py
|
2355
|
# -*- coding: utf-8 -*-
import models
|
chaluemwut/fbserver
|
refs/heads/master
|
venv/lib/python2.7/site-packages/scipy/signal/setup.py
|
10
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('signal', parent_package, top_path)
config.add_data_dir('tests')
config.add_data_dir('benchmarks')
config.add_extension('sigtools',
sources=['sigtoolsmodule.c', 'firfilter.c',
'medianfilter.c', 'lfilter.c.src',
'correlate_nd.c.src'],
depends=['sigtools.h'],
include_dirs=['.']
)
config.add_extension('_spectral', sources=['_spectral.c'])
config.add_extension('_max_len_seq', sources=['_max_len_seq.c'])
spline_src = ['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c',
'C_bspline_util.c', 'Z_bspline_util.c', 'bspline_util.c']
config.add_extension('spline', sources=spline_src)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
mer-tools/git-repo
|
refs/heads/stable
|
subcmds/branches.py
|
5
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from color import Coloring
from command import Command
class BranchColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'branch')
self.current = self.printer('current', fg='green')
self.local = self.printer('local')
self.notinproject = self.printer('notinproject', fg='red')
class BranchInfo(object):
def __init__(self, name):
self.name = name
self.current = 0
self.published = 0
self.published_equal = 0
self.projects = []
def add(self, b):
if b.current:
self.current += 1
if b.published:
self.published += 1
if b.revision == b.published:
self.published_equal += 1
self.projects.append(b)
@property
def IsCurrent(self):
return self.current > 0
@property
def IsSplitCurrent(self):
return self.current != 0 and self.current != len(self.projects)
@property
def IsPublished(self):
return self.published > 0
@property
def IsPublishedEqual(self):
return self.published_equal == len(self.projects)
class Branches(Command):
common = True
helpSummary = "View current topic branches"
helpUsage = """
%prog [<project>...]
Summarizes the currently available topic branches.
# Branch Display
The branch display output by this command is organized into four
columns of information; for example:
*P nocolor | in repo
repo2 |
The first column contains a * if the branch is the currently
checked out branch in any of the specified projects, or a blank
if no project has the branch checked out.
The second column contains either blank, p or P, depending upon
the upload status of the branch.
(blank): branch not yet published by repo upload
P: all commits were published by repo upload
p: only some commits were published by repo upload
The third column contains the branch name.
The fourth column (after the | separator) lists the projects that
the branch appears in, or does not appear in. If no project list
is shown, then the branch appears in all projects.
"""
def Execute(self, opt, args):
projects = self.GetProjects(args)
out = BranchColoring(self.manifest.manifestProject.config)
all_branches = {}
project_cnt = len(projects)
for project in projects:
for name, b in project.GetBranches().items():
b.project = project
if name not in all_branches:
all_branches[name] = BranchInfo(name)
all_branches[name].add(b)
names = list(sorted(all_branches))
if not names:
print(' (no branches)', file=sys.stderr)
return
width = 25
for name in names:
if width < len(name):
width = len(name)
for name in names:
i = all_branches[name]
in_cnt = len(i.projects)
if i.IsCurrent:
current = '*'
hdr = out.current
else:
current = ' '
hdr = out.local
if i.IsPublishedEqual:
published = 'P'
elif i.IsPublished:
published = 'p'
else:
published = ' '
hdr('%c%c %-*s' % (current, published, width, name))
out.write(' |')
if in_cnt < project_cnt:
fmt = out.write
paths = []
non_cur_paths = []
if i.IsSplitCurrent or (in_cnt < project_cnt - in_cnt):
in_type = 'in'
for b in i.projects:
if not i.IsSplitCurrent or b.current:
paths.append(b.project.relpath)
else:
non_cur_paths.append(b.project.relpath)
else:
fmt = out.notinproject
in_type = 'not in'
have = set()
for b in i.projects:
have.add(b.project)
for p in projects:
if p not in have:
paths.append(p.relpath)
s = ' %s %s' % (in_type, ', '.join(paths))
if not i.IsSplitCurrent and (width + 7 + len(s) < 80):
fmt = out.current if i.IsCurrent else fmt
fmt(s)
else:
fmt(' %s:' % in_type)
fmt = out.current if i.IsCurrent else out.write
for p in paths:
out.nl()
fmt(width * ' ' + ' %s' % p)
fmt = out.write
for p in non_cur_paths:
out.nl()
fmt(width * ' ' + ' %s' % p)
else:
out.write(' in all projects')
out.nl()
|
forseti-security/forseti-security
|
refs/heads/master
|
google/cloud/forseti/common/util/replay.py
|
1
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper functions used to record and replay API responses."""
from builtins import str
import collections
import functools
import os
import pickle
from googleapiclient import errors
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
RECORD_ENVIRONMENT_VAR = 'FORSETI_RECORD_FILE'
REPLAY_ENVIRONMENT_VAR = 'FORSETI_REPLAY_FILE'
def _key_from_request(request):
"""Generate a unique key from a request.
Args:
request (HttpRequest): a googleapiclient HttpRequest object.
Returns:
str: A unique key from the request uri and body.
"""
return '{}{}'.format(request.uri, request.body)
def record(requests):
"""Record and serialize GCP API call answers.
Args:
requests (dict): A dictionary to store a copy of all requests and
responses in, before pickling.
Returns:
function: Decorator function.
"""
def decorate(f):
"""Decorator function for the wrapper.
Args:
f(function): passes a function into the wrapper.
Returns:
function: Wrapped function.
"""
@functools.wraps(f)
def record_wrapper(self, request, *args, **kwargs):
"""Record and serialize GCP API call answers.
Args:
self (object): Self of the caller.
request (HttpRequest): The HttpRequest object to execute.
**args (list): Additional args to pass through to function.
**kwargs (dict): Additional key word args to pass through to
function.
Returns:
object: The result from the wrapped function.
Raises:
HttpError: Raised by any fatal HTTP error when executing the
HttpRequest.
Exception: Any exception raised by the wrapped function.
"""
record_file = os.environ.get(RECORD_ENVIRONMENT_VAR, None)
if not record_file:
return f(self, request, *args, **kwargs)
with open(record_file, 'wb') as outfile:
pickler = pickle.Pickler(outfile)
request_key = _key_from_request(request)
results = requests.setdefault(
request_key, collections.deque())
try:
result = f(self, request, *args, **kwargs)
obj = {
'exception_args': None,
'raised': False,
'request': request.to_json(),
'result': result,
'uri': request.uri}
results.append(obj)
return result
except errors.HttpError as e:
# HttpError won't unpickle without all three arguments.
obj = {
'raised': True,
'request': request.to_json(),
'result': e.__class__,
'uri': request.uri,
'exception_args': (e.resp, e.content, e.uri)
}
results.append(obj)
raise
except Exception as e:
LOGGER.exception(e)
obj = {
'raised': True,
'request': request.to_json(),
'result': e.__class__,
'uri': request.uri,
'exception_args': [str(e)]
}
results.append(obj)
raise
finally:
LOGGER.debug('Recording key %s', request_key)
pickler.dump(requests)
outfile.flush()
return record_wrapper
return decorate
def replay(requests):
"""Record and serialize GCP API call answers.
Args:
requests (dict): A dictionary to store a copy of all requests and
responses in, after unpickling.
Returns:
function: Decorator function.
"""
def decorate(f):
"""Replay GCP API call answers.
Args:
f (function): Function to decorate
Returns:
function: Wrapped function.
"""
@functools.wraps(f)
def replay_wrapper(self, request, *args, **kwargs):
"""Replay and deserialize GCP API call answers.
Args:
self (object): Self of the caller.
request (HttpRequest): The HttpRequest object to execute.
**args (list): Additional args to pass through to function.
**kwargs (dict): Additional key word args to pass through to
function.
Returns:
object: The result object from the previous recording.
Raises:
Exception: Any exception raised during the previous recording.
"""
replay_file = os.environ.get(REPLAY_ENVIRONMENT_VAR, None)
if not replay_file:
return f(self, request, *args, **kwargs)
if not requests:
LOGGER.info('Loading replay file %s.', replay_file)
with open(replay_file, 'rb') as infile:
unpickler = pickle.Unpickler(infile)
requests.update(unpickler.load())
request_key = _key_from_request(request)
if request_key in requests:
results = requests[request_key]
# Pull the first result from the queue.
obj = results.popleft()
if obj['raised']:
raise obj['result'](*obj['exception_args'])
return obj['result']
else:
LOGGER.warning(
'Request URI %s with body %s not found in recorded '
'requests, executing live http request instead.',
request.uri, request.body)
return f(self, request, *args, **kwargs)
return replay_wrapper
return decorate
|
sergiocorato/server-tools
|
refs/heads/8.0
|
log_forwarded_for_ip/models/__init__.py
|
17
|
# -*- coding: utf-8 -*-
# © 2015 Aserti Global Solutions
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import log_forwarded_for_ip_installed
|
KeyWeeUsr/plyer
|
refs/heads/master
|
plyer/platforms/android/audio.py
|
6
|
from jnius import autoclass
from plyer.facades.audio import Audio
# Recorder Classes
MediaRecorder = autoclass('android.media.MediaRecorder')
AudioSource = autoclass('android.media.MediaRecorder$AudioSource')
OutputFormat = autoclass('android.media.MediaRecorder$OutputFormat')
AudioEncoder = autoclass('android.media.MediaRecorder$AudioEncoder')
# Player Classes
MediaPlayer = autoclass('android.media.MediaPlayer')
class AndroidAudio(Audio):
'''Audio for android.
For recording audio we use MediaRecorder Android class.
For playing audio we use MediaPlayer Android class.
'''
def __init__(self, file_path=None):
default_path = '/sdcard/testrecorder.3gp'
super(AndroidAudio, self).__init__(file_path or default_path)
self._recorder = None
self._player = None
def _start(self):
self._recorder = MediaRecorder()
self._recorder.setAudioSource(AudioSource.DEFAULT)
self._recorder.setOutputFormat(OutputFormat.DEFAULT)
self._recorder.setAudioEncoder(AudioEncoder.DEFAULT)
self._recorder.setOutputFile(self.file_path)
self._recorder.prepare()
self._recorder.start()
def _stop(self):
if self._recorder:
self._recorder.stop()
self._recorder.release()
self._recorder = None
if self._player:
self._player.stop()
self._player.release()
self._player = None
def _play(self):
self._player = MediaPlayer()
self._player.setDataSource(self.file_path)
self._player.prepare()
self._player.start()
def instance():
return AndroidAudio()
|
lmregus/Portfolio
|
refs/heads/master
|
python/design_patterns/env/lib/python3.7/site-packages/IPython/core/tests/test_inputsplitter.py
|
2
|
# -*- coding: utf-8 -*-
"""Tests for the inputsplitter module."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import unittest
import sys
import nose.tools as nt
from IPython.core import inputsplitter as isp
from IPython.core.inputtransformer import InputTransformer
from IPython.core.tests.test_inputtransformer import syntax, syntax_ml
from IPython.testing import tools as tt
from IPython.utils import py3compat
from IPython.utils.py3compat import input
#-----------------------------------------------------------------------------
# Semi-complete examples (also used as tests)
#-----------------------------------------------------------------------------
# Note: at the bottom, there's a slightly more complete version of this that
# can be useful during development of code here.
def mini_interactive_loop(input_func):
"""Minimal example of the logic of an interactive interpreter loop.
This serves as an example, and it is used by the test system with a fake
raw_input that simulates interactive input."""
from IPython.core.inputsplitter import InputSplitter
isp = InputSplitter()
# In practice, this input loop would be wrapped in an outside loop to read
# input indefinitely, until some exit/quit command was issued. Here we
# only illustrate the basic inner loop.
while isp.push_accepts_more():
indent = ' '*isp.get_indent_spaces()
prompt = '>>> ' + indent
line = indent + input_func(prompt)
isp.push(line)
# Here we just return input so we can use it in a test suite, but a real
# interpreter would instead send it for execution somewhere.
src = isp.source_reset()
#print 'Input source was:\n', src # dbg
return src
#-----------------------------------------------------------------------------
# Test utilities, just for local use
#-----------------------------------------------------------------------------
def assemble(block):
"""Assemble a block into multi-line sub-blocks."""
return ['\n'.join(sub_block)+'\n' for sub_block in block]
def pseudo_input(lines):
"""Return a function that acts like raw_input but feeds the input list."""
ilines = iter(lines)
def raw_in(prompt):
try:
return next(ilines)
except StopIteration:
return ''
return raw_in
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_spaces():
tests = [('', 0),
(' ', 1),
('\n', 0),
(' \n', 1),
('x', 0),
(' x', 1),
(' x',2),
(' x',4),
# Note: tabs are counted as a single whitespace!
('\tx', 1),
('\t x', 2),
]
tt.check_pairs(isp.num_ini_spaces, tests)
def test_remove_comments():
tests = [('text', 'text'),
('text # comment', 'text '),
('text # comment\n', 'text \n'),
('text # comment \n', 'text \n'),
('line # c \nline\n','line \nline\n'),
('line # c \nline#c2 \nline\nline #c\n\n',
'line \nline\nline\nline \n\n'),
]
tt.check_pairs(isp.remove_comments, tests)
def test_get_input_encoding():
encoding = isp.get_input_encoding()
nt.assert_true(isinstance(encoding, str))
# simple-minded check that at least encoding a simple string works with the
# encoding we got.
nt.assert_equal(u'test'.encode(encoding), b'test')
class NoInputEncodingTestCase(unittest.TestCase):
def setUp(self):
self.old_stdin = sys.stdin
class X: pass
fake_stdin = X()
sys.stdin = fake_stdin
def test(self):
# Verify that if sys.stdin has no 'encoding' attribute we do the right
# thing
enc = isp.get_input_encoding()
self.assertEqual(enc, 'ascii')
def tearDown(self):
sys.stdin = self.old_stdin
class InputSplitterTestCase(unittest.TestCase):
def setUp(self):
self.isp = isp.InputSplitter()
def test_reset(self):
isp = self.isp
isp.push('x=1')
isp.reset()
self.assertEqual(isp._buffer, [])
self.assertEqual(isp.get_indent_spaces(), 0)
self.assertEqual(isp.source, '')
self.assertEqual(isp.code, None)
self.assertEqual(isp._is_complete, False)
def test_source(self):
self.isp._store('1')
self.isp._store('2')
self.assertEqual(self.isp.source, '1\n2\n')
self.assertEqual(len(self.isp._buffer)>0, True)
self.assertEqual(self.isp.source_reset(), '1\n2\n')
self.assertEqual(self.isp._buffer, [])
self.assertEqual(self.isp.source, '')
def test_indent(self):
isp = self.isp # shorthand
isp.push('x=1')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('if 1:\n x=1')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push('y=2\n')
self.assertEqual(isp.get_indent_spaces(), 0)
def test_indent2(self):
isp = self.isp
isp.push('if 1:')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push(' x=1')
self.assertEqual(isp.get_indent_spaces(), 4)
# Blank lines shouldn't change the indent level
isp.push(' '*2)
self.assertEqual(isp.get_indent_spaces(), 4)
def test_indent3(self):
isp = self.isp
# When a multiline statement contains parens or multiline strings, we
# shouldn't get confused.
isp.push("if 1:")
isp.push(" x = (1+\n 2)")
self.assertEqual(isp.get_indent_spaces(), 4)
def test_indent4(self):
isp = self.isp
# whitespace after ':' should not screw up indent level
isp.push('if 1: \n x=1')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push('y=2\n')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('if 1:\t\n x=1')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push('y=2\n')
self.assertEqual(isp.get_indent_spaces(), 0)
def test_dedent_pass(self):
isp = self.isp # shorthand
# should NOT cause dedent
isp.push('if 1:\n passes = 5')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push('if 1:\n pass')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('if 1:\n pass ')
self.assertEqual(isp.get_indent_spaces(), 0)
def test_dedent_break(self):
isp = self.isp # shorthand
# should NOT cause dedent
isp.push('while 1:\n breaks = 5')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push('while 1:\n break')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('while 1:\n break ')
self.assertEqual(isp.get_indent_spaces(), 0)
def test_dedent_continue(self):
isp = self.isp # shorthand
# should NOT cause dedent
isp.push('while 1:\n continues = 5')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push('while 1:\n continue')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('while 1:\n continue ')
self.assertEqual(isp.get_indent_spaces(), 0)
def test_dedent_raise(self):
isp = self.isp # shorthand
# should NOT cause dedent
isp.push('if 1:\n raised = 4')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push('if 1:\n raise TypeError()')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('if 1:\n raise')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('if 1:\n raise ')
self.assertEqual(isp.get_indent_spaces(), 0)
def test_dedent_return(self):
isp = self.isp # shorthand
# should NOT cause dedent
isp.push('if 1:\n returning = 4')
self.assertEqual(isp.get_indent_spaces(), 4)
isp.push('if 1:\n return 5 + 493')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('if 1:\n return')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('if 1:\n return ')
self.assertEqual(isp.get_indent_spaces(), 0)
isp.push('if 1:\n return(0)')
self.assertEqual(isp.get_indent_spaces(), 0)
def test_push(self):
isp = self.isp
self.assertEqual(isp.push('x=1'), True)
def test_push2(self):
isp = self.isp
self.assertEqual(isp.push('if 1:'), False)
for line in [' x=1', '# a comment', ' y=2']:
print(line)
self.assertEqual(isp.push(line), True)
def test_push3(self):
isp = self.isp
isp.push('if True:')
isp.push(' a = 1')
self.assertEqual(isp.push('b = [1,'), False)
def test_push_accepts_more(self):
isp = self.isp
isp.push('x=1')
self.assertEqual(isp.push_accepts_more(), False)
def test_push_accepts_more2(self):
isp = self.isp
isp.push('if 1:')
self.assertEqual(isp.push_accepts_more(), True)
isp.push(' x=1')
self.assertEqual(isp.push_accepts_more(), True)
isp.push('')
self.assertEqual(isp.push_accepts_more(), False)
def test_push_accepts_more3(self):
isp = self.isp
isp.push("x = (2+\n3)")
self.assertEqual(isp.push_accepts_more(), False)
def test_push_accepts_more4(self):
isp = self.isp
# When a multiline statement contains parens or multiline strings, we
# shouldn't get confused.
# FIXME: we should be able to better handle de-dents in statements like
# multiline strings and multiline expressions (continued with \ or
# parens). Right now we aren't handling the indentation tracking quite
# correctly with this, though in practice it may not be too much of a
# problem. We'll need to see.
isp.push("if 1:")
isp.push(" x = (2+")
isp.push(" 3)")
self.assertEqual(isp.push_accepts_more(), True)
isp.push(" y = 3")
self.assertEqual(isp.push_accepts_more(), True)
isp.push('')
self.assertEqual(isp.push_accepts_more(), False)
def test_push_accepts_more5(self):
isp = self.isp
isp.push('try:')
isp.push(' a = 5')
isp.push('except:')
isp.push(' raise')
# We want to be able to add an else: block at this point, so it should
# wait for a blank line.
self.assertEqual(isp.push_accepts_more(), True)
def test_continuation(self):
isp = self.isp
isp.push("import os, \\")
self.assertEqual(isp.push_accepts_more(), True)
isp.push("sys")
self.assertEqual(isp.push_accepts_more(), False)
def test_syntax_error(self):
isp = self.isp
# Syntax errors immediately produce a 'ready' block, so the invalid
# Python can be sent to the kernel for evaluation with possible ipython
# special-syntax conversion.
isp.push('run foo')
self.assertEqual(isp.push_accepts_more(), False)
def test_unicode(self):
self.isp.push(u"Pérez")
self.isp.push(u'\xc3\xa9')
self.isp.push(u"u'\xc3\xa9'")
def test_line_continuation(self):
""" Test issue #2108."""
isp = self.isp
# A blank line after a line continuation should not accept more
isp.push("1 \\\n\n")
self.assertEqual(isp.push_accepts_more(), False)
# Whitespace after a \ is a SyntaxError. The only way to test that
# here is to test that push doesn't accept more (as with
# test_syntax_error() above).
isp.push(r"1 \ ")
self.assertEqual(isp.push_accepts_more(), False)
# Even if the line is continuable (c.f. the regular Python
# interpreter)
isp.push(r"(1 \ ")
self.assertEqual(isp.push_accepts_more(), False)
def test_check_complete(self):
isp = self.isp
self.assertEqual(isp.check_complete("a = 1"), ('complete', None))
self.assertEqual(isp.check_complete("for a in range(5):"), ('incomplete', 4))
self.assertEqual(isp.check_complete("raise = 2"), ('invalid', None))
self.assertEqual(isp.check_complete("a = [1,\n2,"), ('incomplete', 0))
self.assertEqual(isp.check_complete("def a():\n x=1\n global x"), ('invalid', None))
class InteractiveLoopTestCase(unittest.TestCase):
"""Tests for an interactive loop like a python shell.
"""
def check_ns(self, lines, ns):
"""Validate that the given input lines produce the resulting namespace.
Note: the input lines are given exactly as they would be typed in an
auto-indenting environment, as mini_interactive_loop above already does
auto-indenting and prepends spaces to the input.
"""
src = mini_interactive_loop(pseudo_input(lines))
test_ns = {}
exec(src, test_ns)
# We can't check that the provided ns is identical to the test_ns,
# because Python fills test_ns with extra keys (copyright, etc). But
# we can check that the given dict is *contained* in test_ns
for k,v in ns.items():
self.assertEqual(test_ns[k], v)
def test_simple(self):
self.check_ns(['x=1'], dict(x=1))
def test_simple2(self):
self.check_ns(['if 1:', 'x=2'], dict(x=2))
def test_xy(self):
self.check_ns(['x=1; y=2'], dict(x=1, y=2))
def test_abc(self):
self.check_ns(['if 1:','a=1','b=2','c=3'], dict(a=1, b=2, c=3))
def test_multi(self):
self.check_ns(['x =(1+','1+','2)'], dict(x=4))
class IPythonInputTestCase(InputSplitterTestCase):
"""By just creating a new class whose .isp is a different instance, we
re-run the same test battery on the new input splitter.
In addition, this runs the tests over the syntax and syntax_ml dicts that
were tested by individual functions, as part of the OO interface.
It also makes some checks on the raw buffer storage.
"""
def setUp(self):
self.isp = isp.IPythonInputSplitter()
def test_syntax(self):
"""Call all single-line syntax tests from the main object"""
isp = self.isp
for example in syntax.values():
for raw, out_t in example:
if raw.startswith(' '):
continue
isp.push(raw+'\n')
out_raw = isp.source_raw
out = isp.source_reset()
self.assertEqual(out.rstrip(), out_t,
tt.pair_fail_msg.format("inputsplitter",raw, out_t, out))
self.assertEqual(out_raw.rstrip(), raw.rstrip())
def test_syntax_multiline(self):
isp = self.isp
for example in syntax_ml.values():
for line_pairs in example:
out_t_parts = []
raw_parts = []
for lraw, out_t_part in line_pairs:
if out_t_part is not None:
out_t_parts.append(out_t_part)
if lraw is not None:
isp.push(lraw)
raw_parts.append(lraw)
out_raw = isp.source_raw
out = isp.source_reset()
out_t = '\n'.join(out_t_parts).rstrip()
raw = '\n'.join(raw_parts).rstrip()
self.assertEqual(out.rstrip(), out_t)
self.assertEqual(out_raw.rstrip(), raw)
def test_syntax_multiline_cell(self):
isp = self.isp
for example in syntax_ml.values():
out_t_parts = []
for line_pairs in example:
raw = '\n'.join(r for r, _ in line_pairs if r is not None)
out_t = '\n'.join(t for _,t in line_pairs if t is not None)
out = isp.transform_cell(raw)
# Match ignoring trailing whitespace
self.assertEqual(out.rstrip(), out_t.rstrip())
def test_cellmagic_preempt(self):
isp = self.isp
for raw, name, line, cell in [
("%%cellm a\nIn[1]:", u'cellm', u'a', u'In[1]:'),
("%%cellm \nline\n>>> hi", u'cellm', u'', u'line\n>>> hi'),
(">>> %%cellm \nline\n>>> hi", u'cellm', u'', u'line\nhi'),
("%%cellm \n>>> hi", u'cellm', u'', u'>>> hi'),
("%%cellm \nline1\nline2", u'cellm', u'', u'line1\nline2'),
("%%cellm \nline1\\\\\nline2", u'cellm', u'', u'line1\\\\\nline2'),
]:
expected = "get_ipython().run_cell_magic(%r, %r, %r)" % (
name, line, cell
)
out = isp.transform_cell(raw)
self.assertEqual(out.rstrip(), expected.rstrip())
def test_multiline_passthrough(self):
isp = self.isp
class CommentTransformer(InputTransformer):
def __init__(self):
self._lines = []
def push(self, line):
self._lines.append(line + '#')
def reset(self):
text = '\n'.join(self._lines)
self._lines = []
return text
isp.physical_line_transforms.insert(0, CommentTransformer())
for raw, expected in [
("a=5", "a=5#"),
("%ls foo", "get_ipython().run_line_magic(%r, %r)" % (u'ls', u'foo#')),
("!ls foo\n%ls bar", "get_ipython().system(%r)\nget_ipython().run_line_magic(%r, %r)" % (
u'ls foo#', u'ls', u'bar#'
)),
("1\n2\n3\n%ls foo\n4\n5", "1#\n2#\n3#\nget_ipython().run_line_magic(%r, %r)\n4#\n5#" % (u'ls', u'foo#')),
]:
out = isp.transform_cell(raw)
self.assertEqual(out.rstrip(), expected.rstrip())
#-----------------------------------------------------------------------------
# Main - use as a script, mostly for developer experiments
#-----------------------------------------------------------------------------
if __name__ == '__main__':
# A simple demo for interactive experimentation. This code will not get
# picked up by any test suite.
from IPython.core.inputsplitter import IPythonInputSplitter
# configure here the syntax to use, prompt and whether to autoindent
#isp, start_prompt = InputSplitter(), '>>> '
isp, start_prompt = IPythonInputSplitter(), 'In> '
autoindent = True
#autoindent = False
try:
while True:
prompt = start_prompt
while isp.push_accepts_more():
indent = ' '*isp.get_indent_spaces()
if autoindent:
line = indent + input(prompt+indent)
else:
line = input(prompt)
isp.push(line)
prompt = '... '
# Here we just return input so we can use it in a test suite, but a
# real interpreter would instead send it for execution somewhere.
#src = isp.source; raise EOFError # dbg
raw = isp.source_raw
src = isp.source_reset()
print('Input source was:\n', src)
print('Raw source was:\n', raw)
except EOFError:
print('Bye')
# Tests for cell magics support
def test_last_blank():
nt.assert_false(isp.last_blank(''))
nt.assert_false(isp.last_blank('abc'))
nt.assert_false(isp.last_blank('abc\n'))
nt.assert_false(isp.last_blank('abc\na'))
nt.assert_true(isp.last_blank('\n'))
nt.assert_true(isp.last_blank('\n '))
nt.assert_true(isp.last_blank('abc\n '))
nt.assert_true(isp.last_blank('abc\n\n'))
nt.assert_true(isp.last_blank('abc\nd\n\n'))
nt.assert_true(isp.last_blank('abc\nd\ne\n\n'))
nt.assert_true(isp.last_blank('abc \n \n \n\n'))
def test_last_two_blanks():
nt.assert_false(isp.last_two_blanks(''))
nt.assert_false(isp.last_two_blanks('abc'))
nt.assert_false(isp.last_two_blanks('abc\n'))
nt.assert_false(isp.last_two_blanks('abc\n\na'))
nt.assert_false(isp.last_two_blanks('abc\n \n'))
nt.assert_false(isp.last_two_blanks('abc\n\n'))
nt.assert_true(isp.last_two_blanks('\n\n'))
nt.assert_true(isp.last_two_blanks('\n\n '))
nt.assert_true(isp.last_two_blanks('\n \n'))
nt.assert_true(isp.last_two_blanks('abc\n\n '))
nt.assert_true(isp.last_two_blanks('abc\n\n\n'))
nt.assert_true(isp.last_two_blanks('abc\n\n \n'))
nt.assert_true(isp.last_two_blanks('abc\n\n \n '))
nt.assert_true(isp.last_two_blanks('abc\n\n \n \n'))
nt.assert_true(isp.last_two_blanks('abc\nd\n\n\n'))
nt.assert_true(isp.last_two_blanks('abc\nd\ne\nf\n\n\n'))
class CellMagicsCommon(object):
def test_whole_cell(self):
src = "%%cellm line\nbody\n"
out = self.sp.transform_cell(src)
ref = u"get_ipython().run_cell_magic('cellm', 'line', 'body')\n"
nt.assert_equal(out, py3compat.u_format(ref))
def test_cellmagic_help(self):
self.sp.push('%%cellm?')
nt.assert_false(self.sp.push_accepts_more())
def tearDown(self):
self.sp.reset()
class CellModeCellMagics(CellMagicsCommon, unittest.TestCase):
sp = isp.IPythonInputSplitter(line_input_checker=False)
def test_incremental(self):
sp = self.sp
sp.push('%%cellm firstline\n')
nt.assert_true(sp.push_accepts_more()) #1
sp.push('line2\n')
nt.assert_true(sp.push_accepts_more()) #2
sp.push('\n')
# This should accept a blank line and carry on until the cell is reset
nt.assert_true(sp.push_accepts_more()) #3
def test_no_strip_coding(self):
src = '\n'.join([
'%%writefile foo.py',
'# coding: utf-8',
'print(u"üñîçø∂é")',
])
out = self.sp.transform_cell(src)
nt.assert_in('# coding: utf-8', out)
class LineModeCellMagics(CellMagicsCommon, unittest.TestCase):
sp = isp.IPythonInputSplitter(line_input_checker=True)
def test_incremental(self):
sp = self.sp
sp.push('%%cellm line2\n')
nt.assert_true(sp.push_accepts_more()) #1
sp.push('\n')
# In this case, a blank line should end the cell magic
nt.assert_false(sp.push_accepts_more()) #2
indentation_samples = [
('a = 1', 0),
('for a in b:', 4),
('def f():', 4),
('def f(): #comment', 4),
('a = ":#not a comment"', 0),
('def f():\n a = 1', 4),
('def f():\n return 1', 0),
('for a in b:\n'
' if a < 0:'
' continue', 3),
('a = {', 4),
('a = {\n'
' 1,', 5),
('b = """123', 0),
('', 0),
('def f():\n pass', 0),
('class Bar:\n def f():\n pass', 4),
('class Bar:\n def f():\n raise', 4),
]
def test_find_next_indent():
for code, exp in indentation_samples:
res = isp.find_next_indent(code)
msg = "{!r} != {!r} (expected)\n Code: {!r}".format(res, exp, code)
assert res == exp, msg
|
saydulk/newfies-dialer
|
refs/heads/develop
|
newfies/dnc/migrations/0001_initial.py
|
5
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DNC',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Enter a DNC list name', max_length=50, null=True, verbose_name='name')),
('description', models.TextField(help_text='DNC notes', null=True, blank=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(related_name='DNC owner', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'dnc_list',
'verbose_name': 'Do Not Call list',
'verbose_name_plural': 'Do Not Call lists',
'permissions': (('view_dnc', 'can see Do Not Call list'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DNCContact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('phone_number', models.CharField(max_length=120, verbose_name='phone number', db_index=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('dnc', models.ForeignKey(verbose_name='Do Not Call List', to='dnc.DNC')),
],
options={
'db_table': 'dnc_contact',
'verbose_name': 'Do Not Call contact',
'verbose_name_plural': 'Do Not Call contacts',
'permissions': (('view_dnc_contact', 'can see Do Not Call contact'),),
},
bases=(models.Model,),
),
]
|
samueldotj/TeeRISC-Simulator
|
refs/heads/master
|
util/checkpoint-tester.py
|
63
|
#! /usr/bin/env python
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Steve Reinhardt
#
# Basic test script for checkpointing.
#
# Given an M5 command and an interval (in ticks), this script will:
# 1. Run the command, dumping periodic checkpoints at the given interval.
# 2. Rerun the command for each pair of adjacent checkpoints:
# a. Restore from checkpoint N
# b. Run until the timestamp of checkpoint N+1
# c. Dump a checkpoint and end the simulation
# d. Diff the new checkpoint with the original checkpoint N+1
#
# Note that '--' must be used to separate the script options from the
# M5 command line.
#
# Caveats:
#
# - This script relies on the checkpoint options implemented in
# configs/common/Simulation.py, so it works with commands based on
# the se.py and fs.py scripts in configs/example, but does not work
# directly with the existing regression tests.
# - Interleaving simulator and program output can cause discrepancies
# in the file position checkpoint information since different runs
# have different amount of simulator output.
# - Probably lots more issues we don't even know about yet.
#
# Examples:
#
# util/checkpoint-tester.py -i 400000 -- build/ALPHA_SE/m5.opt \
# configs/example/se.py -c tests/test-progs/hello/bin/alpha/tru64/hello \
# --output=progout --errout=progerr
#
# util/checkpoint-tester.py -i 200000000000 -- build/ALPHA_FS/m5.opt \
# configs/example/fs.py --script tests/halt.sh
#
import os, sys, re
import subprocess
import optparse
parser = optparse.OptionParser()
parser.add_option('-i', '--interval', type='int')
parser.add_option('-d', '--directory', default='checkpoint-test')
(options, args) = parser.parse_args()
interval = options.interval
if os.path.exists(options.directory):
print 'Error: test directory', options.directory, 'exists'
print ' Tester needs to create directory from scratch'
sys.exit(1)
top_dir = options.directory
os.mkdir(top_dir)
cmd_echo = open(os.path.join(top_dir, 'command'), 'w')
print >>cmd_echo, ' '.join(sys.argv)
cmd_echo.close()
m5_binary = args[0]
options = args[1:]
initial_args = ['--take-checkpoints', '%d,%d' % (interval, interval)]
cptdir = os.path.join(top_dir, 'm5out')
print '===> Running initial simulation.'
subprocess.call([m5_binary] + ['-red', cptdir] + options + initial_args)
dirs = os.listdir(cptdir)
expr = re.compile('cpt\.([0-9]*)')
cpts = []
for dir in dirs:
match = expr.match(dir)
if match:
cpts.append(int(match.group(1)))
cpts.sort()
# We test by loading checkpoint N, simulating to (and dumping at)
# checkpoint N+1, then comparing the resulting checkpoint with the
# original checkpoint N+1. Thus the number of tests we can run is one
# less than tha number of checkpoints.
for i in range(1, len(cpts)):
print '===> Running test %d of %d.' % (i, len(cpts)-1)
mydir = os.path.join(top_dir, 'test.%d' % i)
subprocess.call([m5_binary] + ['-red', mydir] + options + initial_args +
['--max-checkpoints' , '1', '--checkpoint-dir', cptdir,
'--checkpoint-restore', str(i)])
cpt_name = 'cpt.%d' % cpts[i]
diff_name = os.path.join(mydir, 'diffout')
diffout = open(diff_name, 'w')
subprocess.call(['diff', '-ru', '-I', '^##.*',
'%s/%s' % (cptdir, cpt_name),
'%s/%s' % (mydir, cpt_name)], stdout=diffout)
diffout.close()
# print out the diff
diffout = open(diff_name)
print diffout.read(),
diffout.close()
|
juandiegoag/Jugador-heur-stico-de-Tetris
|
refs/heads/master
|
Cuadricula.py
|
1
|
__author__ = 'rick9'
# -*- coding: utf-8 -*-
import pygame
import sys
from Movimientos import *
from Tablero import *
from Piezas import *
from Puntaje import *
from Jugadas import evaluarJugadas
from Evolucion import *
from random import randint
import time
import threading
# Definición de constantes
# COLORES---------------------------------------------------
# Fondos
NEGRO = (0, 0, 0)
BLANCO = (255, 255, 255)
GRIS = (6, 6, 6)
# Colores piezas, uno para cada tipo
AZUL = (0, 0, 255)
ROJO = (255, 0, 0)
MAGENTA = (208, 32, 144)
CYAN = (0, 206, 209)
AMARILLO = (255, 255, 0)
LIMA = (50, 205, 50)
NARANJA = (255, 140, 0)
#DIMENSIONES ---------------------------------------------
# Establecemos el LARGO y ALTO de cada celda de la pantalla.
LARGO = 20
ALTO = 20
# Establecemos el margen entre las celdas.
MARGEN = 2
#INICIALIZACIONES ----------------------------------------
# Inicializamos pygame
pygame.init()
# Establecemos el LARGO y ALTO de la pantalla
DIMENSION_VENTANA = [1032, 492]
pantalla = pygame.display.set_mode(DIMENSION_VENTANA)
# Establecemos el título de la pantalla.
pygame.display.set_caption("Tetris Algoritmo Genético")
# Reloj para marcar el tiempo de refrescamiento de la pantalla
reloj = pygame.time.Clock()
# Recibe una matriz (lista de listas) de enteros y a partir de los valores
# dibuja el tablero en cada iteración usando el método draw.rect de pygame
# el color de cada celda depende del tipo de pieza en el tablero
def dibujar(grid, offset):
for fila in range(22):
for columna in range(10):
color = NEGRO
if grid[fila][columna] == 1:
color = CYAN
elif grid[fila][columna] == 2:
color = MAGENTA
elif grid[fila][columna] == 3:
color = ROJO
elif grid[fila][columna] == 4:
color = LIMA
elif grid[fila][columna] == 5:
color = AMARILLO
elif grid[fila][columna] == 6:
color = NARANJA
elif grid[fila][columna] == 7:
color = AZUL
if fila == 0 or fila == 1:
color = GRIS
pygame.draw.rect(pantalla,
color,
[(MARGEN+LARGO) * columna + MARGEN + 260 * offset,
(MARGEN+ALTO) * fila + MARGEN,
LARGO,
ALTO])
# Retorna cuatro tableros nuevos inicializados en 0
def listaTableros():
lista = []
for x in range(4):
tablero = []
for fila in range(22):
tablero.append([])
for columna in range(10):
tablero[fila].append(0) # Añade una celda
lista.append(tablero)
return lista
# Método más importante, se encarga de ejecutar una corrida de Tetris
def juego(grid, offset, gen, puntaje):
finCaida = False
# Se genera pieza aleatoria de Tetris
pieza = generarPieza(randint(1,7))
#Hace un número aleatorio de rotaciones a la pieza
for x in range(0, randint(0,4)):
pieza = rotarDerecha(grid,22,10,pieza)
# Muy importante: evalúa la pieza actual y simula las posibles jugadas, la más efectiva según los pesos actuales de cada
# heurística definirá el número de desplazamientos a realizar
movimientos = evaluarJugadas(grid, pieza, gen)
# Si son positivos se mueve hacia la derecha, si no hacia la izquierda, si es 0 no se mueve
if(movimientos[1] > 0):
for x in range(0, movimientos[1]):
desplazarDerecha(grid,pieza)
elif movimientos[1] < 0:
for x in range(0, - movimientos[1]):
desplazarIzquierda(grid, pieza)
# Reordena para que la pieza caiga en orden correcto, de lo contrario se deforma
pieza.coordenadas = reordenar_para_abajo(pieza.coordenadas)
# Ciclo de caída de pieza termina cuando la pieza toca el piso o choca con otra
# En cada iteración se actualiza el grid y se dibuja nuevamente
while not finCaida:
finCaida = caidaPieza(grid, 22, 10, pieza)
dibujar(grid, offset)
pygame.display.flip()
#Define la velocidad de la animación
reloj.tick(20)
# Se suma al puntaje: 18 por pieza mas 50 por fila llena en el grid
puntaje += incrementar(18, grid)
# Si al final de la caída de una pieza se llena alguna línea entonces se elimina
eliminarLlenas(grid)
# Si se llena el tablero entonces este se reinicia llenandolo con 0s para la siguiente partida
if(tableroLleno(grid)):
del grid[:]
for fila in range(22):
# Añadimos un array vacío que contendrá cada celda
# en esta fila
grid.append([])
for columna in range(10):
grid[fila].append(0) # Añade una celda
# Método más importante que inicializará variables globales para los hilos
# Llama a los hilos
def main():
# Se crean los tableros para cada hilo, 4 en total
tableros = listaTableros()
pantalla.fill(GRIS)
# Inicializa una generación de genes aleatorios, cada uno corresponde a un peso para cada heurística
genes = listaGenes()
# Inicializa puntajes para cada hilo
puntajes = listaPuntajes()
# booleano que controla el ciclo principal
corriendo = True
generaciones = 1
while corriendo: # Ciclo principal
for evento in pygame.event.get(): # Eventos de pygame, no usados pero necesarios manejarlos para que el programa corra
if evento.type == pygame.QUIT:
corriendo = False
# Se crean cuatro hilos para que cada uno se encarge de cada partida en cada generación
t1 = threading.Thread(target=juego, args=(tableros[0],0,genes[0],puntajes[0],))
t2 = threading.Thread(target=juego, args=(tableros[1],1,genes[1],puntajes[1],))
t3 = threading.Thread(target=juego, args=(tableros[2],2,genes[2],puntajes[2],))
t4 = threading.Thread(target=juego, args=(tableros[3],3,genes[3],puntajes[3],))
# Se corren hilos
t1.start()
t2.start()
t3.start()
t4.start()
# join a cada hilo
t1.join(10)
t2.join(10)
t3.join(10)
t4.join(10)
# Con base en los resultados de cada corrida se crea una nueva generación de genes modificando la lista "genes"
# Esta lista irá "evolucionada" a la siguiente iteración
nuevaGeneracion(genes, puntajes)
generaciones += 1
#nuevaGeneracion(genes, puntajes)
main()
# Llamado para terminar pygame
pygame.quit()
|
jinnykoo/wuyisj
|
refs/heads/master
|
sites/sandbox/apps/gateway/forms.py
|
60
|
from django import forms
from django.contrib.auth.models import User
from oscar.apps.customer.utils import normalise_email
class GatewayForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
email = normalise_email(self.cleaned_data['email'])
if User.objects.filter(email__iexact=email).exists():
raise forms.ValidationError(
"A user already exists with email %s" % email
)
return email
|
cloudflare/phantomjs
|
refs/heads/master
|
src/breakpad/src/tools/gyp/test/hello/gyptest-disable-regyp.py
|
501
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles don't get rebuilt when a source gyp file changes and
the disable_regeneration generator flag is set.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('hello.gyp', '-Gauto_regeneration=0')
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, world!\n")
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL)
# Should still be the old executable, as regeneration was disabled.
test.run_built_executable('hello', stdout="Hello, world!\n")
test.pass_test()
|
nickeubank/python-igraph
|
refs/heads/master
|
igraph/test/games.py
|
2
|
import unittest
from igraph import *
class GameTests(unittest.TestCase):
def testGRG(self):
g = Graph.GRG(50, 0.2)
self.assertTrue(isinstance(g, Graph))
g = Graph.GRG(50, 0.2, True)
self.assertTrue(isinstance(g, Graph))
self.assertTrue("x" in g.vertex_attributes())
self.assertTrue("y" in g.vertex_attributes())
self.assertTrue(isinstance(Layout(zip(g.vs["x"], g.vs["y"])), Layout))
def testForestFire(self):
g=Graph.Forest_Fire(100, 0.1)
self.assertTrue(isinstance(g, Graph) and g.is_directed() == False)
g=Graph.Forest_Fire(100, 0.1, directed=True)
self.assertTrue(isinstance(g, Graph) and g.is_directed() == True)
def testRecentDegree(self):
g=Graph.Recent_Degree(100, 5, 10)
self.assertTrue(isinstance(g, Graph))
def testPreference(self):
g=Graph.Preference(100, [1, 1], [[1, 0], [0, 1]])
self.assertTrue(isinstance(g, Graph) and len(g.clusters()) == 2)
g=Graph.Preference(100, [1, 1], [[1, 0], [0, 1]], attribute="type")
l=g.vs.get_attribute_values("type")
self.assertTrue(min(l) == 0 and max(l) == 1)
def testAsymmetricPreference(self):
g=Graph.Asymmetric_Preference(100, [[0, 1], [1, 0]], [[0, 1], [1, 0]])
self.assertTrue(isinstance(g, Graph) and len(g.clusters()) == 2)
g=Graph.Asymmetric_Preference(100, [[0, 1], [1, 0]], [[1, 0], [0, 1]],\
attribute="type")
l=g.vs.get_attribute_values("type")
l1=[i[0] for i in l]
l2=[i[1] for i in l]
self.assertTrue(min(l1) == 0 and max(l1) == 1 and
min(l2) == 0 and max(l2) == 1)
g=Graph.Asymmetric_Preference(100, [[0, 1], [1, 0]], [[1, 0], [0, 1]])
self.assertTrue(isinstance(g, Graph) and len(g.clusters()) == 1)
def testWattsStrogatz(self):
g=Graph.Watts_Strogatz(1, 20, 1, 0.2)
self.assertTrue(isinstance(g, Graph) and g.vcount()==20 and g.ecount()==20)
def testRandomBipartiteNP(self):
# Test np mode, undirected
g = Graph.Random_Bipartite(10, 20, p=0.25)
self.assertTrue(g.is_simple())
self.assertTrue(g.is_bipartite())
self.assertFalse(g.is_directed())
self.assertEqual([False]*10 + [True]*20, g.vs["type"])
# Test np mode, directed, "out"
g = Graph.Random_Bipartite(10, 20, p=0.25, directed=True, neimode="out")
self.assertTrue(g.is_simple())
self.assertTrue(g.is_bipartite())
self.assertTrue(g.is_directed())
self.assertEqual([False]*10 + [True]*20, g.vs["type"])
self.assertTrue(all(g.vs[e.tuple]["type"] == [False, True] for e in g.es))
# Test np mode, directed, "in"
g = Graph.Random_Bipartite(10, 20, p=0.25, directed=True, neimode="in")
self.assertTrue(g.is_simple())
self.assertTrue(g.is_bipartite())
self.assertTrue(g.is_directed())
self.assertEqual([False]*10 + [True]*20, g.vs["type"])
self.assertTrue(all(g.vs[e.tuple]["type"] == [True, False] for e in g.es))
# Test np mode, directed, "all"
g = Graph.Random_Bipartite(10, 20, p=0.25, directed=True, neimode="all")
self.assertTrue(g.is_simple())
self.assertTrue(g.is_bipartite())
self.assertTrue(g.is_directed())
self.assertEqual([False]*10 + [True]*20, g.vs["type"])
def testRandomBipartiteNM(self):
# Test np mode, undirected
g = Graph.Random_Bipartite(10, 20, m=50)
self.assertTrue(g.is_simple())
self.assertTrue(g.is_bipartite())
self.assertFalse(g.is_directed())
self.assertEqual(50, g.ecount())
self.assertEqual([False]*10 + [True]*20, g.vs["type"])
# Test np mode, directed, "out"
g = Graph.Random_Bipartite(10, 20, m=50, directed=True, neimode="out")
self.assertTrue(g.is_simple())
self.assertTrue(g.is_bipartite())
self.assertTrue(g.is_directed())
self.assertEqual(50, g.ecount())
self.assertEqual([False]*10 + [True]*20, g.vs["type"])
self.assertTrue(all(g.vs[e.tuple]["type"] == [False, True] for e in g.es))
# Test np mode, directed, "in"
g = Graph.Random_Bipartite(10, 20, m=50, directed=True, neimode="in")
self.assertTrue(g.is_simple())
self.assertTrue(g.is_bipartite())
self.assertTrue(g.is_directed())
self.assertEqual(50, g.ecount())
self.assertEqual([False]*10 + [True]*20, g.vs["type"])
self.assertTrue(all(g.vs[e.tuple]["type"] == [True, False] for e in g.es))
# Test np mode, directed, "all"
g = Graph.Random_Bipartite(10, 20, m=50, directed=True, neimode="all")
self.assertTrue(g.is_simple())
self.assertTrue(g.is_bipartite())
self.assertTrue(g.is_directed())
self.assertEqual(50, g.ecount())
self.assertEqual([False]*10 + [True]*20, g.vs["type"])
def testRewire(self):
# Undirected graph
g=Graph.GRG(25, 0.4)
degrees=g.degree()
# Rewiring without loops
g.rewire(10000)
self.assertEqual(degrees, g.degree())
self.assertTrue(g.is_simple())
# Rewiring with loops (1)
g.rewire(10000, mode="loops")
self.assertEqual(degrees, g.degree())
self.assertFalse(any(g.is_multiple()))
# Rewiring with loops (2)
g = Graph.Full(4)
g[1,3] = 0
degrees = g.degree()
g.rewire(100, mode="loops")
self.assertEqual(degrees, g.degree())
self.assertFalse(any(g.is_multiple()))
# Directed graph
g=Graph.GRG(25, 0.4)
g.to_directed("mutual")
indeg, outdeg = g.indegree(), g.outdegree()
g.rewire(10000)
self.assertEqual(indeg, g.indegree())
self.assertEqual(outdeg, g.outdegree())
self.assertTrue(g.is_simple())
# Directed graph with loops
g.rewire(10000, mode="loops")
self.assertEqual(indeg, g.indegree())
self.assertEqual(outdeg, g.outdegree())
self.assertFalse(any(g.is_multiple()))
def suite():
game_suite = unittest.makeSuite(GameTests)
return unittest.TestSuite([game_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
NexusIS/libcloud
|
refs/heads/trunk
|
docs/examples/dns/zonomi/instantiate_driver.py
|
37
|
from libcloud.dns.types import Provider
from libcloud.dns.providers import get_driver
cls = get_driver(Provider.ZONOMI)
driver = cls('apikey')
|
Telestream/telestream-cloud-python-sdk
|
refs/heads/master
|
telestream_cloud_qc_sdk/telestream_cloud_qc/models/must_or_must_not.py
|
1
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class MustOrMustNot(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
MUST = "Must"
MUSTNOT = "MustNot"
allowable_values = [MUST, MUSTNOT] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""MustOrMustNot - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MustOrMustNot):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MustOrMustNot):
return True
return self.to_dict() != other.to_dict()
|
pipermerriam/django
|
refs/heads/master
|
tests/unmanaged_models/tests.py
|
296
|
from __future__ import unicode_literals
from django.db import connection
from django.test import TestCase
from .models import A01, A02, B01, B02, C01, C02, Managed1, Unmanaged2
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests whilst we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a = [a]
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertIsInstance(a2, A02)
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertIsInstance(b2, B02)
self.assertEqual(b2.f_a, "fred")
self.assertIsInstance(b2.fk_a, A02)
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertIsInstance(resp[0], C02)
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertNotIn(table, tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertIn(table, tables, "Table '%s' does not exist." % table)
|
vivekanand1101/anitya
|
refs/heads/master
|
runserver.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" The flask application """
## These two lines are needed to run on EL6
__requires__ = ['SQLAlchemy >= 0.7', 'jinja2 >= 2.4']
import pkg_resources
from anitya.app import APP
if __name__ == '__main__':
APP.debug = True
APP.run()
|
CamelBackNotation/CarnotKE
|
refs/heads/master
|
jyhton/out/production/jyhton/test376.py
|
13
|
"""
[ 631035 ] Negative repeat cause java exception.
"""
import support
assert "0"*-1 == ""
|
vriera/micropython
|
refs/heads/master
|
tests/basics/bool1.py
|
77
|
# tests for bool objects
# basic logic
print(not False)
print(not True)
print(False and True)
print(False or True)
# unary operators
print(+True)
print(-True)
# unsupported unary op
try:
len(False)
except TypeError:
print('TypeError')
|
pradyunsg/pip
|
refs/heads/update-to-libera
|
src/pip/_internal/operations/build/wheel.py
|
6
|
import logging
import os
from typing import Optional
from pip._vendor.pep517.wrappers import Pep517HookCaller
from pip._internal.utils.subprocess import runner_with_spinner_message
logger = logging.getLogger(__name__)
def build_wheel_pep517(
name, # type: str
backend, # type: Pep517HookCaller
metadata_directory, # type: str
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one InstallRequirement using the PEP 517 build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
assert metadata_directory is not None
try:
logger.debug('Destination directory: %s', tempd)
runner = runner_with_spinner_message(
f'Building wheel for {name} (PEP 517)'
)
with backend.subprocess_runner(runner):
wheel_name = backend.build_wheel(
tempd,
metadata_directory=metadata_directory,
)
except Exception:
logger.error('Failed building wheel for %s', name)
return None
return os.path.join(tempd, wheel_name)
|
rghe/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/k8s/inventory.py
|
7
|
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ansible.module_utils.k8s.common import K8sAnsibleMixin, HAS_K8S_MODULE_HELPER
try:
from ansible.errors import AnsibleError
except ImportError:
AnsibleError = Exception
try:
from openshift.dynamic.exceptions import DynamicApiError
except ImportError:
pass
class K8sInventoryException(Exception):
pass
class K8sInventoryHelper(K8sAnsibleMixin):
helper = None
transport = 'kubectl'
def setup(self, config_data, cache, cache_key):
connections = config_data.get('connections')
if not HAS_K8S_MODULE_HELPER:
raise K8sInventoryException(
"This module requires the OpenShift Python client. Try `pip install openshift`"
)
source_data = None
if cache and cache_key in self._cache:
try:
source_data = self._cache[cache_key]
except KeyError:
pass
if not source_data:
self.fetch_objects(connections)
def fetch_objects(self, connections):
client = self.get_api_client()
if connections:
if not isinstance(connections, list):
raise K8sInventoryException("Expecting connections to be a list.")
for connection in connections:
if not isinstance(connection, dict):
raise K8sInventoryException("Expecting connection to be a dictionary.")
client = self.get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_pods_for_namespace(client, name, namespace)
self.get_services_for_namespace(client, name, namespace)
else:
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_pods_for_namespace(client, name, namespace)
self.get_services_for_namespace(client, name, namespace)
@staticmethod
def get_default_host_name(host):
return host.replace('https://', '').replace('http://', '').replace('.', '-').replace(':', '_')
def get_available_namespaces(self, client):
v1_namespace = client.resources.get(api_version='v1', kind='Namespace')
try:
obj = v1_namespace.get()
except DynamicApiError as exc:
raise K8sInventoryException('Error fetching Namespace list: {0}'.format(exc.message))
return [namespace.metadata.name for namespace in obj.items]
def get_pods_for_namespace(self, client, name, namespace):
v1_pod = client.resources.get(api_version='v1', kind='Pod')
try:
obj = v1_pod.get(namespace=namespace)
except DynamicApiError as exc:
raise K8sInventoryException('Error fetching Pod list: {0}'.format(exc.message))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_pods_group = '{0}_pods'.format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_pods_group)
self.inventory.add_child(namespace_group, namespace_pods_group)
for pod in obj.items:
pod_name = pod.metadata.name
pod_groups = []
pod_labels = {} if not pod.metadata.labels else pod.metadata.labels
pod_annotations = {} if not pod.metadata.annotations else pod.metadata.annotations
if pod.metadata.labels:
pod_labels = pod.metadata.labels
# create a group for each label_value
for key, value in pod.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
if group_name not in pod_groups:
pod_groups.append(group_name)
self.inventory.add_group(group_name)
for container in pod.status.containerStatuses:
# add each pod_container to the namespace group, and to each label_value group
container_name = '{0}_{1}'.format(pod.metadata.name, container.name)
self.inventory.add_host(container_name)
self.inventory.add_child(namespace_pods_group, container_name)
if pod_groups:
for group in pod_groups:
self.inventory.add_child(group, container_name)
# Add hostvars
self.inventory.set_variable(container_name, 'object_type', 'pod')
self.inventory.set_variable(container_name, 'labels', pod_labels)
self.inventory.set_variable(container_name, 'annotations', pod_annotations)
self.inventory.set_variable(container_name, 'cluster_name', pod.metadata.clusterName)
self.inventory.set_variable(container_name, 'pod_node_name', pod.spec.nodeName)
self.inventory.set_variable(container_name, 'pod_name', pod.spec.name)
self.inventory.set_variable(container_name, 'pod_host_ip', pod.status.hostIP)
self.inventory.set_variable(container_name, 'pod_phase', pod.status.phase)
self.inventory.set_variable(container_name, 'pod_ip', pod.status.podIP)
self.inventory.set_variable(container_name, 'pod_self_link', pod.metadata.selfLink)
self.inventory.set_variable(container_name, 'pod_resource_version', pod.metadata.resourceVersion)
self.inventory.set_variable(container_name, 'pod_uid', pod.metadata.uid)
self.inventory.set_variable(container_name, 'container_name', container.image)
self.inventory.set_variable(container_name, 'container_image', container.image)
if container.state.running:
self.inventory.set_variable(container_name, 'container_state', 'Running')
if container.state.terminated:
self.inventory.set_variable(container_name, 'container_state', 'Terminated')
if container.state.waiting:
self.inventory.set_variable(container_name, 'container_state', 'Waiting')
self.inventory.set_variable(container_name, 'container_ready', container.ready)
self.inventory.set_variable(container_name, 'ansible_remote_tmp', '/tmp/')
self.inventory.set_variable(container_name, 'ansible_connection', self.transport)
self.inventory.set_variable(container_name, 'ansible_{0}_pod'.format(self.transport),
pod_name)
self.inventory.set_variable(container_name, 'ansible_{0}_container'.format(self.transport),
container.name)
self.inventory.set_variable(container_name, 'ansible_{0}_namespace'.format(self.transport),
namespace)
def get_services_for_namespace(self, client, name, namespace):
v1_service = client.resources.get(api_version='v1', kind='Service')
try:
obj = v1_service.get(namespace=namespace)
except DynamicApiError as exc:
raise K8sInventoryException('Error fetching Service list: {0}'.format(exc.message))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_services_group = '{0}_services'.format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_services_group)
self.inventory.add_child(namespace_group, namespace_services_group)
for service in obj.items:
service_name = service.metadata.name
service_labels = {} if not service.metadata.labels else service.metadata.labels
service_annotations = {} if not service.metadata.annotations else service.metadata.annotations
self.inventory.add_host(service_name)
if service.metadata.labels:
# create a group for each label_value
for key, value in service.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, service_name)
try:
self.inventory.add_child(namespace_services_group, service_name)
except AnsibleError as e:
raise
ports = [{'name': port.name,
'port': port.port,
'protocol': port.protocol,
'targetPort': port.targetPort,
'nodePort': port.nodePort} for port in service.spec.ports or []]
# add hostvars
self.inventory.set_variable(service_name, 'object_type', 'service')
self.inventory.set_variable(service_name, 'labels', service_labels)
self.inventory.set_variable(service_name, 'annotations', service_annotations)
self.inventory.set_variable(service_name, 'cluster_name', service.metadata.clusterName)
self.inventory.set_variable(service_name, 'ports', ports)
self.inventory.set_variable(service_name, 'type', service.spec.type)
self.inventory.set_variable(service_name, 'self_link', service.metadata.selfLink)
self.inventory.set_variable(service_name, 'resource_version', service.metadata.resourceVersion)
self.inventory.set_variable(service_name, 'uid', service.metadata.uid)
if service.spec.externalTrafficPolicy:
self.inventory.set_variable(service_name, 'external_traffic_policy',
service.spec.externalTrafficPolicy)
if service.spec.externalIPs:
self.inventory.set_variable(service_name, 'external_ips', service.spec.externalIPs)
if service.spec.externalName:
self.inventory.set_variable(service_name, 'external_name', service.spec.externalName)
if service.spec.healthCheckNodePort:
self.inventory.set_variable(service_name, 'health_check_node_port',
service.spec.healthCheckNodePort)
if service.spec.loadBalancerIP:
self.inventory.set_variable(service_name, 'load_balancer_ip',
service.spec.loadBalancerIP)
if service.spec.selector:
self.inventory.set_variable(service_name, 'selector', service.spec.selector)
if hasattr(service.status.loadBalancer, 'ingress') and service.status.loadBalancer.ingress:
load_balancer = [{'hostname': ingress.hostname,
'ip': ingress.ip} for ingress in service.status.loadBalancer.ingress]
self.inventory.set_variable(service_name, 'load_balancer', load_balancer)
class OpenShiftInventoryHelper(K8sInventoryHelper):
helper = None
transport = 'oc'
def fetch_objects(self, connections):
super(OpenShiftInventoryHelper, self).fetch_objects(connections)
client = self.get_api_client()
if connections:
for connection in connections:
client = self.get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_routes_for_namespace(client, name, namespace)
else:
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_routes_for_namespace(client, name, namespace)
def get_routes_for_namespace(self, client, name, namespace):
v1_route = client.resources.get(api_version='v1', kind='Route')
try:
obj = v1_route.get(namespace=namespace)
except DynamicApiError as exc:
raise K8sInventoryException('Error fetching Routes list: {0}'.format(exc.message))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_routes_group = '{0}_routes'.format(namespace_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_routes_group)
self.inventory.add_child(namespace_group, namespace_routes_group)
for route in obj.items:
route_name = route.metadata.name
route_labels = {} if not route.metadata.labels else route.metadata.labels
route_annotations = {} if not route.metadata.annotations else route.metadata.annotations
self.inventory.add_host(route_name)
if route.metadata.labels:
# create a group for each label_value
for key, value in route.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, route_name)
self.inventory.add_child(namespace_routes_group, route_name)
# add hostvars
self.inventory.set_variable(route_name, 'labels', route_labels)
self.inventory.set_variable(route_name, 'annotations', route_annotations)
self.inventory.set_variable(route_name, 'cluster_name', route.metadata.clusterName)
self.inventory.set_variable(route_name, 'object_type', 'route')
self.inventory.set_variable(route_name, 'self_link', route.metadata.selfLink)
self.inventory.set_variable(route_name, 'resource_version', route.metadata.resourceVersion)
self.inventory.set_variable(route_name, 'uid', route.metadata.uid)
if route.spec.host:
self.inventory.set_variable(route_name, 'host', route.spec.host)
if route.spec.path:
self.inventory.set_variable(route_name, 'path', route.spec.path)
if hasattr(route.spec.port, 'targetPort') and route.spec.port.targetPort:
self.inventory.set_variable(route_name, 'port', route.spec.port)
|
gmcquillan/hyde
|
refs/heads/master
|
hyde.py
|
39
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import os
import sys
import hydeengine
PROG_ROOT = os.path.dirname(os.path.realpath( __file__ ))
def main():
parser = optparse.OptionParser(usage="%prog [-f] [-q]", version="%prog 0.5.3")
parser.add_option("-s", "--sitepath",
dest = "site_path",
help = "Change the path of the site folder.")
parser.add_option("-i", "--init", action = 'store_true',
dest = "init", default = False,
help = "Create a new hyde site.")
parser.add_option("-f", "--force", action = 'store_true',
dest = "force_init", default = False, help = "")
parser.add_option("-t", "--template",
dest = "template",
help = "Choose which template you want to use.")
parser.add_option("-g", "--generate", action = "store_true",
dest = "generate", default = False,
help = "Generate the source for your hyde site.")
parser.add_option("-k", "--keep_watching", action = "store_true",
dest = "keep_watching", default = False,
help = "Start monitoring the source folder for changes.")
parser.add_option("-d", "--deploy_to",
dest = "deploy_to",
help = "Change the path of the deploy folder.")
parser.add_option("-w", "--webserve", action = "store_true",
dest = "webserve", default = False,
help = "Start serving using a webserver.")
parser.add_option("--web-flavor", metavar='NAME', default="CherryPy", help="Specify the flavor of the server (CherryPy, gevent)")
parser.add_option("--settings", default="settings", help="Specify the settings file name to be used")
parser.add_option("-p", "--port",
dest = "port", default=8080,
type='int',
help = "Port webserver should listen on (8080).")
parser.add_option("-a", "--address",
dest = "address", default='localhost',
help = "Address webserver should listen on (localhost).")
(options, args) = parser.parse_args()
if len(args):
parser.error("Unexpected arguments encountered.")
if options.webserve:
servers = {'cherrypy': hydeengine.Server,
'gevent': hydeengine.GeventServer}
Server = servers.get(options.web_flavor.lower())
if not Server:
parser.error('Invalid web service flavor "%s" (valid: %s)' % \
(options.web_flavor, ', '.join(servers.keys())))
if not options.site_path:
options.site_path = os.getcwdu()
if options.deploy_to:
options.deploy_to = os.path.abspath(options.deploy_to)
if options.init:
initializer = hydeengine.Initializer(options.site_path)
initializer.initialize(PROG_ROOT,
options.template, options.force_init)
generator = None
server = None
def quit(*args, **kwargs):
if server and server.alive:
server.quit()
if generator:
generator.quit()
if options.generate:
generator = hydeengine.Generator(options.site_path)
generator.generate(options.deploy_to, options.keep_watching, quit, options.settings)
if options.webserve:
server = Server(options.site_path, address=options.address, port=options.port)
server.serve(options.deploy_to, quit, options.settings)
if ((options.generate and options.keep_watching)
or
options.webserve):
try:
print "Letting the server and/or the generator do their thing..."
if server:
server.block()
if generator:
generator.quit()
elif generator:
generator.block()
except:
print sys.exc_info()
quit()
if len(sys.argv) == 1:
print parser.format_option_help()
if __name__ == "__main__":
main()
# import cProfile
# cProfile.run('main(sys.argv[1:])', filename='hyde.cprof')
# import pstats
# stats = pstats.Stats('hyde.cprof')
# stats.strip_dirs().sort_stats('time').print_stats(20)
|
nerith/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/hosts.py
|
196
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
class HostsLine(object):
def __init__(self, ip_address, canonical_hostname, aliases=None, comment=None):
self.ip_address = ip_address
self.canonical_hostname = canonical_hostname
self.aliases = aliases if aliases is not None else []
self.comment = comment
if self.ip_address is None:
assert self.canonical_hostname is None
assert not self.aliases
assert self.comment is not None
@classmethod
def from_string(cls, line):
if not line.strip():
return
line = line.strip()
ip_address = None
canonical_hostname = None
aliases = []
comment = None
comment_parts = line.split("#", 1)
if len(comment_parts) > 1:
comment = comment_parts[1]
data = comment_parts[0].strip()
if data:
fields = data.split()
if len(fields) < 2:
raise ValueError("Invalid hosts line")
ip_address = fields[0]
canonical_hostname = fields[1]
aliases = fields[2:]
return cls(ip_address, canonical_hostname, aliases, comment)
class HostsFile(object):
def __init__(self):
self.data = []
self.by_hostname = {}
def set_host(self, host):
if host.canonical_hostname is None:
self.data.append(host)
elif host.canonical_hostname in self.by_hostname:
old_host = self.by_hostname[host.canonical_hostname]
old_host.ip_address = host.ip_address
old_host.aliases = host.aliases
old_host.comment = host.comment
else:
self.data.append(host)
self.by_hostname[host.canonical_hostname] = host
@classmethod
def from_file(cls, f):
rv = cls()
for line in f:
host = HostsLine.from_string(line)
if host is not None:
rv.set_host(host)
return rv
def to_string(self):
field_widths = [0, 0]
for line in self.data:
if line.ip_address is not None:
field_widths[0] = max(field_widths[0], len(line.ip_address))
field_widths[1] = max(field_widths[1], len(line.canonical_hostname))
lines = []
for host in self.data:
line = ""
if host.ip_address is not None:
ip_string = host.ip_address.ljust(field_widths[0])
hostname_str = host.canonical_hostname
if host.aliases:
hostname_str = "%s %s" % (hostname_str.ljust(field_widths[1]),
" ".join(host.aliases))
line = "%s %s" % (ip_string, hostname_str)
if host.comment:
if line:
line += " "
line += "#%s" % host.comment
lines.append(line)
lines.append("")
return "\n".join(lines)
def to_file(self, f):
f.write(self.to_string().encode("utf8"))
|
qisanstudio/qsapp-express
|
refs/heads/master
|
src/express/views/__init__.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import index
from . import account
from . import bill
from . import knowledge
__all__ = ['index', 'account', 'bill', 'knowledge']
|
ghmajx/asuswrt-merlin
|
refs/heads/374.43_2-update
|
release/src/router/samba-3.6.13/lib/subunit/runtests.py
|
35
|
#!/usr/bin/env python
# -*- Mode: python -*-
#
# Copyright (C) 2004 Canonical.com
# Author: Robert Collins <robert.collins@canonical.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import unittest
from subunit.tests.TestUtil import TestVisitor, TestSuite
import subunit
import sys
import os
import shutil
import logging
class ParameterisableTextTestRunner(unittest.TextTestRunner):
"""I am a TextTestRunner whose result class is
parameterisable without further subclassing"""
def __init__(self, **args):
unittest.TextTestRunner.__init__(self, **args)
self._resultFactory=None
def resultFactory(self, *args):
"""set or retrieve the result factory"""
if args:
self._resultFactory=args[0]
return self
if self._resultFactory is None:
self._resultFactory=unittest._TextTestResult
return self._resultFactory
def _makeResult(self):
return self.resultFactory()(self.stream, self.descriptions, self.verbosity)
class EarlyStoppingTextTestResult(unittest._TextTestResult):
"""I am a TextTestResult that can optionally stop at the first failure
or error"""
def addError(self, test, err):
unittest._TextTestResult.addError(self, test, err)
if self.stopOnError():
self.stop()
def addFailure(self, test, err):
unittest._TextTestResult.addError(self, test, err)
if self.stopOnFailure():
self.stop()
def stopOnError(self, *args):
"""should this result indicate an abort when an error occurs?
TODO parameterise this"""
return True
def stopOnFailure(self, *args):
"""should this result indicate an abort when a failure error occurs?
TODO parameterise this"""
return True
def earlyStopFactory(*args, **kwargs):
"""return a an early stopping text test result"""
result=EarlyStoppingTextTestResult(*args, **kwargs)
return result
class ShellTests(subunit.ExecTestCase):
def test_sourcing(self):
"""./shell/tests/test_source_library.sh"""
def test_functions(self):
"""./shell/tests/test_function_output.sh"""
def test_suite():
result = TestSuite()
result.addTest(subunit.test_suite())
result.addTest(ShellTests('test_sourcing'))
result.addTest(ShellTests('test_functions'))
return result
class filteringVisitor(TestVisitor):
"""I accrue all the testCases I visit that pass a regexp filter on id
into my suite
"""
def __init__(self, filter):
import re
TestVisitor.__init__(self)
self._suite=None
self.filter=re.compile(filter)
def suite(self):
"""answer the suite we are building"""
if self._suite is None:
self._suite=TestSuite()
return self._suite
def visitCase(self, aCase):
if self.filter.match(aCase.id()):
self.suite().addTest(aCase)
def main(argv):
"""To parameterise what tests are run, run this script like so:
python test_all.py REGEX
i.e.
python test_all.py .*Protocol.*
to run all tests with Protocol in their id."""
if len(argv) > 1:
pattern = argv[1]
else:
pattern = ".*"
visitor = filteringVisitor(pattern)
test_suite().visit(visitor)
runner = ParameterisableTextTestRunner(verbosity=2)
runner.resultFactory(unittest._TextTestResult)
if not runner.run(visitor.suite()).wasSuccessful():
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
darribas/pysal
|
refs/heads/master
|
pysal/core/util/shapefile.py
|
13
|
"""
A Pure Python ShapeFile Reader and Writer
This module is selfcontained and does not require pysal.
This module returns and expects dictionary based data strucutres.
This module should be wrapped into your native data strcutures.
Contact:
Charles Schmidt
GeoDa Center
Arizona State University
Tempe, AZ
http://geodacenter.asu.edu
"""
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
from struct import calcsize, unpack, pack
from cStringIO import StringIO
from itertools import izip, islice
import array
import sys
if sys.byteorder == 'little':
SYS_BYTE_ORDER = '<'
else:
SYS_BYTE_ORDER = '>'
STRUCT_ITEMSIZE = {}
STRUCT_ITEMSIZE['i'] = calcsize('i')
STRUCT_ITEMSIZE['d'] = calcsize('d')
__all__ = ['shp_file', 'shx_file']
#SHAPEFILE Globals
def struct2arrayinfo(struct):
struct = list(struct)
names = [x[0] for x in struct]
types = [x[1] for x in struct]
orders = [x[2] for x in struct]
lname, ltype, lorder = struct.pop(0)
groups = {}
g = 0
groups[g] = {'names': [lname], 'size': STRUCT_ITEMSIZE[ltype],
'fmt': ltype, 'order': lorder}
while struct:
name, type, order = struct.pop(0)
if order == lorder:
groups[g]['names'].append(name)
groups[g]['size'] += STRUCT_ITEMSIZE[type]
groups[g]['fmt'] += type
else:
g += 1
groups[g] = {'names': [name], 'size': STRUCT_ITEMSIZE[
type], 'fmt': type, 'order': order}
lname, ltype, lorder = name, type, order
return [groups[x] for x in range(g + 1)]
HEADERSTRUCT = (
('File Code', 'i', '>'),
('Unused0', 'i', '>'),
('Unused1', 'i', '>'),
('Unused2', 'i', '>'),
('Unused3', 'i', '>'),
('Unused4', 'i', '>'),
('File Length', 'i', '>'),
('Version', 'i', '<'),
('Shape Type', 'i', '<'),
('BBOX Xmin', 'd', '<'),
('BBOX Ymin', 'd', '<'),
('BBOX Xmax', 'd', '<'),
('BBOX Ymax', 'd', '<'),
('BBOX Zmin', 'd', '<'),
('BBOX Zmax', 'd', '<'),
('BBOX Mmin', 'd', '<'),
('BBOX Mmax', 'd', '<'))
UHEADERSTRUCT = struct2arrayinfo(HEADERSTRUCT)
RHEADERSTRUCT = (
('Record Number', 'i', '>'),
('Content Length', 'i', '>'))
URHEADERSTRUCT = struct2arrayinfo(RHEADERSTRUCT)
def noneMax(a, b):
if a is None:
return b
if b is None:
return a
return max(a, b)
def noneMin(a, b):
if a is None:
return b
if b is None:
return a
return min(a, b)
def _unpackDict(structure, fileObj):
"""Utility Function, Requires a Tuple of tuples that desribes the element structure...
_unpackDict(structure tuple, fileObj file) -> dict
Arguments:
structure -- tuple of tuples -- (('FieldName 1','type','byteOrder'),('FieldName 2','type','byteOrder'))
fileObj -- file -- an open file at the correct position!
Returns:
{'FieldName 1': value, 'FieldName 2': value}
Side Effects:
#file at new position
Example:
>>> import pysal
>>> _unpackDict(UHEADERSTRUCT,open(pysal.examples.get_path('10740.shx'),'rb')) == {'BBOX Xmax': -105.29012, 'BBOX Ymax': 36.219799000000002, 'BBOX Mmax': 0.0, 'BBOX Zmin': 0.0, 'BBOX Mmin': 0.0, 'File Code': 9994, 'BBOX Ymin': 34.259672000000002, 'BBOX Xmin': -107.62651, 'Unused0': 0, 'Unused1': 0, 'Unused2': 0, 'Unused3': 0, 'Unused4': 0, 'Version': 1000, 'BBOX Zmax': 0.0, 'Shape Type': 5, 'File Length': 830}
True
"""
d = {}
for struct in structure:
items = unpack(struct['order'] + struct['fmt'],
fileObj.read(struct['size']))
for i, name in enumerate(struct['names']):
d[name] = items[i]
return d
def _unpackDict2(d, structure, fileObj):
"""Utility Function, used arrays instead from struct
Arguments:
d -- dict -- Dictionary to be updated.
structure -- tuple of tuples -- (('FieldName 1',('type',n),'byteOrder'),('FieldName 2',('type',n,'byteOrder'))
"""
for name, dtype, order in structure:
dtype, n = dtype
result = array.array(dtype)
result.fromstring(fileObj.read(result.itemsize * n))
if order != SYS_BYTE_ORDER:
result.byteswap()
d[name] = result.tolist()
return d
def _packDict(structure, d):
"""Utility Function
_packDict(structure tuple, d dict) -> str
Arguments:
structure -- tuple of tuples -- (('FieldName 1','type','byteOrder'),('FieldName 2','type','byteOrder'))
d -- dict -- {'FieldName 1': value, 'FieldName 2': value}
Example:
>>> s = _packDict( (('FieldName 1','i','<'),('FieldName 2','i','<')), {'FieldName 1': 1, 'FieldName 2': 2} )
>>> s==pack('<ii',1,2)
True
>>> unpack('<ii',s)
(1, 2)
"""
string = b''
for name, dtype, order in structure:
if len(dtype) > 1:
string += pack(order + dtype, *d[name])
else:
string += pack(order + dtype, d[name])
return string
class shp_file:
"""
Reads and Writes the SHP compenent of a ShapeFile
Attributes:
header -- dict -- Contents of the SHP header. #For contents see: HEADERSTRUCT
shape -- int -- ShapeType.
Notes: The header of both the SHP and SHX files are indentical.
"""
SHAPE_TYPES = {'POINT': 1, 'ARC': 3, 'POLYGON': 5, 'MULTIPOINT': 8, 'POINTZ': 11, 'ARCZ': 13, 'POLYGONZ': 15, 'MULTIPOINTZ': 18, 'POINTM': 21, 'ARCM': 23, 'POLYGONM': 25, 'MULTIPOINTM': 28, 'MULTIPATCH': 31}
def __iswritable(self):
try:
assert self.__mode == 'w'
except AssertionError:
raise IOError("[Errno 9] Bad file descriptor")
return True
def __isreadable(self):
try:
assert self.__mode == 'r'
except AssertionError:
raise IOError("[Errno 9] Bad file descriptor")
return True
def __init__(self, fileName, mode='r', shape_type=None):
self.__mode = mode
if fileName.lower().endswith('.shp') or fileName.lower().endswith('.shx') or fileName.lower().endswith('.dbf'):
fileName = fileName[:-4]
self.fileName = fileName
if mode == 'r':
self._open_shp_file()
elif mode == 'w':
if shape_type not in self.SHAPE_TYPES:
raise Exception('Attempt to create shp/shx file of invalid type')
self._create_shp_file(shape_type)
else:
raise Exception('Only "w" and "r" modes are supported')
def _open_shp_file(self):
"""
Opens a shp/shx file.
shp_file(fileName string, 'r') -> Shpfile
Arguments:
filename -- the name of the file to create
mode -- string -- 'r'
shape_type -- None
Example:
>>> import pysal
>>> shp = shp_file(pysal.examples.get_path('10740.shp'))
>>> shp.header == {'BBOX Xmax': -105.29012, 'BBOX Ymax': 36.219799000000002, 'BBOX Mmax': 0.0, 'BBOX Zmin': 0.0, 'BBOX Mmin': 0.0, 'File Code': 9994, 'BBOX Ymin': 34.259672000000002, 'BBOX Xmin': -107.62651, 'Unused0': 0, 'Unused1': 0, 'Unused2': 0, 'Unused3': 0, 'Unused4': 0, 'Version': 1000, 'BBOX Zmax': 0.0, 'Shape Type': 5, 'File Length': 260534}
True
>>> len(shp)
195
"""
self.__isreadable()
fileName = self.fileName
self.fileObj = open(fileName + '.shp', 'rb')
self._shx = shx_file(fileName)
self.header = _unpackDict(UHEADERSTRUCT, self.fileObj)
self.shape = TYPE_DISPATCH[self.header['Shape Type']]
self.__lastShape = 0
# localizing for convenience
self.__numRecords = self._shx.numRecords
# constructing bounding box from header
h = self.header
self.bbox = [h['BBOX Xmin'], h['BBOX Ymin'],
h['BBOX Xmax'], h['BBOX Ymax']]
self.shapeType = self.header['Shape Type']
def _create_shp_file(self, shape_type):
"""
Creates a shp/shx file.
shp_file(fileName string, 'w', shape_type string) -> Shpfile
Arguments:
filename -- the name of the file to create
mode -- string -- must be 'w'
shape_type -- string -- the type of shp/shx file to create. must be one of
the following: 'POINT', 'POINTZ', 'POINTM',
'ARC', 'ARCZ', 'ARCM', 'POLYGON', 'POLYGONZ', 'POLYGONM',
'MULTIPOINT', 'MULTIPOINTZ', 'MULTIPOINTM', 'MULTIPATCH'
Example:
>>> import pysal,os
>>> shp = shp_file('test','w','POINT')
>>> p = shp_file(pysal.examples.get_path('Point.shp'))
>>> for pt in p:
... shp.add_shape(pt)
...
>>> shp.close()
>>> open('test.shp','rb').read() == open(pysal.examples.get_path('Point.shp'),'rb').read()
True
>>> open('test.shx','rb').read() == open(pysal.examples.get_path('Point.shx'),'rb').read()
True
>>> os.remove('test.shx')
>>> os.remove('test.shp')
"""
self.__iswritable()
fileName = self.fileName
self.fileObj = open(fileName + '.shp', 'wb')
self._shx = shx_file(fileName, 'w')
self.header = {}
self.header['Shape Type'] = self.SHAPE_TYPES[shape_type]
self.header['Version'] = 1000
self.header['Unused0'] = 0
self.header['Unused1'] = 0
self.header['Unused2'] = 0
self.header['Unused3'] = 0
self.header['Unused4'] = 0
self.header['File Code'] = 9994
self.__file_Length = 100
self.header['File Length'] = 0
self.header['BBOX Xmax'] = None
self.header['BBOX Ymax'] = None
self.header['BBOX Mmax'] = None
self.header['BBOX Zmax'] = None
self.header['BBOX Xmin'] = None
self.header['BBOX Ymin'] = None
self.header['BBOX Mmin'] = None
self.header['BBOX Zmin'] = None
self.shape = TYPE_DISPATCH[self.header['Shape Type']]
#self.__numRecords = self._shx.numRecords
def __len__(self):
return self.__numRecords
def __iter__(self):
return self
def type(self):
return self.shape.String_Type
def next(self):
"""returns the next Shape in the shapeFile
Example:
>>> import pysal
>>> list(shp_file(pysal.examples.get_path('Point.shp'))) == [{'Y': -0.25904661905760773, 'X': -0.00068176617532103578, 'Shape Type': 1}, {'Y': -0.25630328607387354, 'X': 0.11697145363360706, 'Shape Type': 1}, {'Y': -0.33930131004366804, 'X': 0.05043668122270728, 'Shape Type': 1}, {'Y': -0.41266375545851519, 'X': -0.041266375545851552, 'Shape Type': 1}, {'Y': -0.44017467248908293, 'X': -0.011462882096069604, 'Shape Type': 1}, {'Y': -0.46080786026200882, 'X': 0.027510917030567628, 'Shape Type': 1}, {'Y': -0.45851528384279472, 'X': 0.075655021834060809, 'Shape Type': 1}, {'Y': -0.43558951965065495, 'X': 0.11233624454148461, 'Shape Type': 1}, {'Y': -0.40578602620087334, 'X': 0.13984716157205224, 'Shape Type': 1}]
True
"""
self.__isreadable()
nextShape = self.__lastShape
if nextShape == self._shx.numRecords:
self.__lastShape = 0
raise StopIteration
else:
self.__lastShape = nextShape + 1
return self.get_shape(nextShape)
def __seek(self, pos):
if pos != self.fileObj.tell():
self.fileObj.seek(pos)
def __read(self, pos, size):
self.__isreadable()
if pos != self.fileObj.tell():
self.fileObj.seek(pos)
return self.fileObj.read(size)
def get_shape(self, shpId):
self.__isreadable()
if shpId + 1 > self.__numRecords:
raise IndexError
fPosition, bytes = self._shx.index[shpId]
self.__seek(fPosition)
#the index does not include the 2 byte record header (which contains, Record ID and Content Length)
rec_id, con_len = _unpackDict(URHEADERSTRUCT, self.fileObj)
return self.shape.unpack(StringIO(self.fileObj.read(bytes)))
#return self.shape.unpack(self.fileObj.read(bytes))
def __update_bbox(self, s):
h = self.header
if s.get('Shape Type') == 1:
h['BBOX Xmax'] = noneMax(h['BBOX Xmax'], s.get('X'))
h['BBOX Ymax'] = noneMax(h['BBOX Ymax'], s.get('Y'))
h['BBOX Mmax'] = noneMax(h['BBOX Mmax'], s.get('M'))
h['BBOX Zmax'] = noneMax(h['BBOX Zmax'], s.get('Z'))
h['BBOX Xmin'] = noneMin(h['BBOX Xmin'], s.get('X'))
h['BBOX Ymin'] = noneMin(h['BBOX Ymin'], s.get('Y'))
h['BBOX Mmin'] = noneMin(h['BBOX Mmin'], s.get('M'))
h['BBOX Zmin'] = noneMin(h['BBOX Zmin'], s.get('Z'))
else:
h['BBOX Xmax'] = noneMax(h['BBOX Xmax'], s.get('BBOX Xmax'))
h['BBOX Ymax'] = noneMax(h['BBOX Ymax'], s.get('BBOX Ymax'))
h['BBOX Mmax'] = noneMax(h['BBOX Mmax'], s.get('BBOX Mmax'))
h['BBOX Zmax'] = noneMax(h['BBOX Zmax'], s.get('BBOX Zmax'))
h['BBOX Xmin'] = noneMin(h['BBOX Xmin'], s.get('BBOX Xmin'))
h['BBOX Ymin'] = noneMin(h['BBOX Ymin'], s.get('BBOX Ymin'))
h['BBOX Mmin'] = noneMin(h['BBOX Mmin'], s.get('BBOX Mmin'))
h['BBOX Zmin'] = noneMin(h['BBOX Zmin'], s.get('BBOX Zmin'))
if not self.shape.HASM:
self.header['BBOX Mmax'] = 0.0
self.header['BBOX Mmin'] = 0.0
if not self.shape.HASZ:
self.header['BBOX Zmax'] = 0.0
self.header['BBOX Zmin'] = 0.0
def add_shape(self, s):
self.__iswritable()
self.__update_bbox(s)
rec = self.shape.pack(s)
con_len = len(rec)
self.__file_Length += con_len + 8
rec_id, pos = self._shx.add_record(con_len)
self.__seek(pos)
self.fileObj.write(pack('>ii', rec_id, con_len / 2))
self.fileObj.write(rec)
def close(self):
self._shx.close(self.header)
if self.__mode == 'w':
self.header['File Length'] = self.__file_Length / 2
self.__seek(0)
self.fileObj.write(_packDict(HEADERSTRUCT, self.header))
self.fileObj.close()
class shx_file:
"""
Reads and Writes the SHX compenent of a ShapeFile
Attributes:
index -- list -- Contains the file offset and len of each recond in the SHP component
numRecords -- int -- Number of records
"""
def __iswritable(self):
try:
assert self.__mode == 'w'
except AssertionError:
raise IOError("[Errno 9] Bad file descriptor")
return True
def __isreadable(self):
try:
assert self.__mode == 'r'
except AssertionError:
raise IOError("[Errno 9] Bad file descriptor")
return True
def __init__(self, fileName=None, mode='r'):
self.__mode = mode
if fileName.endswith('.shp') or fileName.endswith('.shx') or fileName.endswith('.dbf'):
fileName = fileName[:-4]
self.fileName = fileName
if mode == 'r':
self._open_shx_file()
elif mode == 'w':
self._create_shx_file()
def _open_shx_file(self):
""" Opens the SHX file.
shx_file(filename,'r') --> shx_file
Arguments:
filename -- string -- extension is optional, will remove '.dbf','.shx','.shp' and append '.shx'
mode -- string -- Must be 'r'
Example:
>>> import pysal
>>> shx = shx_file(pysal.examples.get_path('10740'))
>>> shx._header == {'BBOX Xmax': -105.29012, 'BBOX Ymax': 36.219799000000002, 'BBOX Mmax': 0.0, 'BBOX Zmin': 0.0, 'BBOX Mmin': 0.0, 'File Code': 9994, 'BBOX Ymin': 34.259672000000002, 'BBOX Xmin': -107.62651, 'Unused0': 0, 'Unused1': 0, 'Unused2': 0, 'Unused3': 0, 'Unused4': 0, 'Version': 1000, 'BBOX Zmax': 0.0, 'Shape Type': 5, 'File Length': 830}
True
>>> len(shx.index)
195
"""
self.__isreadable()
self.fileObj = open(self.fileName + '.shx', 'rb')
self._header = _unpackDict(UHEADERSTRUCT, self.fileObj)
self.numRecords = numRecords = (self._header['File Length'] - 50) / 4
index = {}
fmt = '>%di' % (2 * numRecords)
size = calcsize(fmt)
dat = unpack(fmt, self.fileObj.read(size))
self.index = [(dat[i] * 2, dat[i + 1] * 2) for i in xrange(
0, len(dat), 2)]
def _create_shx_file(self):
""" Creates the SHX file.
shx_file(filename,'w') --> shx_file
Arguments:
filename -- string -- extension is optional, will remove '.dbf','.shx','.shp' and append '.shx'
mode -- string -- Must be 'w'
Example:
>>> import pysal
>>> shx = shx_file(pysal.examples.get_path('Point'))
>>> isinstance(shx,shx_file)
True
"""
self.__iswritable()
self.fileObj = open(self.fileName + '.shx', 'wb')
self.numRecords = 0
self.index = []
self.__offset = 100 # length of header
self.__next_rid = 1 # record IDs start at 1
def add_record(self, size):
""" Add a record to the shx index.
add_record(size int) --> RecordID int
Arguments:
size -- int -- the length of the record in bytes NOT including the 8byte record header
Returns:
rec_id -- int -- the sequential record ID, 1-based.
Note: the SHX records contain (Offset, Length) in 16-bit words.
Example:
>>> import pysal,os
>>> shx = shx_file(pysal.examples.get_path('Point'))
>>> shx.index
[(100, 20), (128, 20), (156, 20), (184, 20), (212, 20), (240, 20), (268, 20), (296, 20), (324, 20)]
>>> shx2 = shx_file('test','w')
>>> [shx2.add_record(rec[1]) for rec in shx.index]
[(1, 100), (2, 128), (3, 156), (4, 184), (5, 212), (6, 240), (7, 268), (8, 296), (9, 324)]
>>> shx2.index == shx.index
True
>>> shx2.close(shx._header)
>>> open('test.shx','rb').read() == open(pysal.examples.get_path('Point.shx'),'rb').read()
True
>>> os.remove('test.shx')
"""
self.__iswritable()
pos = self.__offset
rec_id = self.__next_rid
self.index.append((self.__offset, size))
self.__offset += size + 8 # the 8byte record Header.
self.numRecords += 1
self.__next_rid += 1
return rec_id, pos
def close(self, header):
if self.__mode == 'w':
self.__iswritable()
header['File Length'] = (
self.numRecords * calcsize('>ii') + 100) / 2
self.fileObj.seek(0)
self.fileObj.write(_packDict(HEADERSTRUCT, header))
fmt = '>%di' % (2 * self.numRecords)
values = []
for off, size in self.index:
values.extend([off / 2, size / 2])
self.fileObj.write(pack(fmt, *values))
self.fileObj.close()
class NullShape:
Shape_Type = 0
STRUCT = (('Shape Type', 'i', '<'))
def unpack(self):
return None
def pack(self, x=None):
return pack('<i', 0)
class Point(object):
""" Packs and Unpacks a ShapeFile Point Type
Example:
>>> import pysal
>>> shp = shp_file(pysal.examples.get_path('Point.shp'))
>>> rec = shp.get_shape(0)
>>> rec == {'Y': -0.25904661905760773, 'X': -0.00068176617532103578, 'Shape Type': 1}
True
>>> pos = shp.fileObj.seek(shp._shx.index[0][0]+8) #+8 byte record header
>>> dat = shp.fileObj.read(shp._shx.index[0][1])
>>> dat == Point.pack(rec)
True
"""
Shape_Type = 1
String_Type = 'POINT'
HASZ = False
HASM = False
STRUCT = (('Shape Type', 'i', '<'),
('X', 'd', '<'),
('Y', 'd', '<'))
USTRUCT = [{'fmt': 'idd', 'order': '<', 'names': ['Shape Type',
'X', 'Y'], 'size': 20}]
@classmethod
def unpack(cls, dat):
return _unpackDict(cls.USTRUCT, dat)
@classmethod
def pack(cls, record):
rheader = _packDict(cls.STRUCT, record)
return rheader
class PointZ(Point):
Shape_Type = 11
String_Type = "POINTZ"
HASZ = True
HASM = True
STRUCT = (('Shape Type', 'i', '<'),
('X', 'd', '<'),
('Y', 'd', '<'),
('Z', 'd', '<'),
('M', 'd', '<'))
USTRUCT = [{'fmt': 'idddd', 'order': '<', 'names': ['Shape Type',
'X', 'Y', 'Z', 'M'], 'size': 36}]
class PolyLine:
""" Packs and Unpacks a ShapeFile PolyLine Type
Example:
>>> import pysal
>>> shp = shp_file(pysal.examples.get_path('Line.shp'))
>>> rec = shp.get_shape(0)
>>> rec == {'BBOX Ymax': -0.25832280562918325, 'NumPoints': 3, 'BBOX Ymin': -0.25895877033237352, 'NumParts': 1, 'Vertices': [(-0.0090539248870159517, -0.25832280562918325), (0.0074811573959305822, -0.25895877033237352), (0.0074811573959305822, -0.25895877033237352)], 'BBOX Xmax': 0.0074811573959305822, 'BBOX Xmin': -0.0090539248870159517, 'Shape Type': 3, 'Parts Index': [0]}
True
>>> pos = shp.fileObj.seek(shp._shx.index[0][0]+8) #+8 byte record header
>>> dat = shp.fileObj.read(shp._shx.index[0][1])
>>> dat == PolyLine.pack(rec)
True
"""
HASZ = False
HASM = False
String_Type = 'ARC'
STRUCT = (('Shape Type', 'i', '<'),
('BBOX Xmin', 'd', '<'),
('BBOX Ymin', 'd', '<'),
('BBOX Xmax', 'd', '<'),
('BBOX Ymax', 'd', '<'),
('NumParts', 'i', '<'),
('NumPoints', 'i', '<'))
USTRUCT = [{'fmt': 'iddddii', 'order': '<', 'names': ['Shape Type', 'BBOX Xmin', 'BBOX Ymin', 'BBOX Xmax', 'BBOX Ymax', 'NumParts', 'NumPoints'], 'size': 44}]
@classmethod
def unpack(cls, dat):
record = _unpackDict(cls.USTRUCT, dat)
contentStruct = (('Parts Index', ('i', record['NumParts']), '<'),
('Vertices', ('d', 2 * record['NumPoints']), '<'))
_unpackDict2(record, contentStruct, dat)
#record['Vertices'] = [(record['Vertices'][i],record['Vertices'][i+1]) for i in xrange(0,record['NumPoints']*2,2)]
verts = record['Vertices']
#Next line is equivalent to: zip(verts[::2],verts[1::2])
record['Vertices'] = list(izip(
islice(verts, 0, None, 2), islice(verts, 1, None, 2)))
if not record['Parts Index']:
record['Parts Index'] = [0]
return record
#partsIndex = list(partsIndex)
#partsIndex.append(None)
#parts = [vertices[partsIndex[i]:partsIndex[i+1]] for i in xrange(header['NumParts'])]
@classmethod
def pack(cls, record):
rheader = _packDict(cls.STRUCT, record)
contentStruct = (('Parts Index', '%di' % record['NumParts'], '<'),
('Vertices', '%dd' % (2 * record['NumPoints']), '<'))
content = {}
content['Parts Index'] = record['Parts Index']
verts = []
[verts.extend(vert) for vert in record['Vertices']]
content['Vertices'] = verts
content = _packDict(contentStruct, content)
return rheader + content
class PolyLineZ(object):
HASZ = True
HASM = True
String_Type = 'ARC'
STRUCT = (('Shape Type', 'i', '<'),
('BBOX Xmin', 'd', '<'),
('BBOX Ymin', 'd', '<'),
('BBOX Xmax', 'd', '<'),
('BBOX Ymax', 'd', '<'),
('NumParts', 'i', '<'),
('NumPoints', 'i', '<'))
USTRUCT = [{'fmt': 'iddddii', 'order': '<', 'names': ['Shape Type', 'BBOX Xmin', 'BBOX Ymin', 'BBOX Xmax', 'BBOX Ymax', 'NumParts', 'NumPoints'], 'size': 44}]
@classmethod
def unpack(cls, dat):
record = _unpackDict(cls.USTRUCT, dat)
contentStruct = (('Parts Index', ('i', record['NumParts']), '<'),
('Vertices', ('d', 2 * record['NumPoints']), '<'),
('Zmin', ('d', 1), '<'),
('Zmax', ('d', 1), '<'),
('Zarray', ('d', record['NumPoints']), '<'),
('Mmin', ('d', 1), '<'),
('Mmax', ('d', 1), '<'),
('Marray', ('d', record['NumPoints']), '<'),)
_unpackDict2(record, contentStruct, dat)
verts = record['Vertices']
record['Vertices'] = list(izip(
islice(verts, 0, None, 2), islice(verts, 1, None, 2)))
if not record['Parts Index']:
record['Parts Index'] = [0]
record['Zmin'] = record['Zmin'][0]
record['Zmax'] = record['Zmax'][0]
record['Mmin'] = record['Mmin'][0]
record['Mmax'] = record['Mmax'][0]
return record
@classmethod
def pack(cls, record):
rheader = _packDict(cls.STRUCT, record)
contentStruct = (('Parts Index', '%di' % record['NumParts'], '<'),
('Vertices', '%dd' % (2 * record['NumPoints']), '<'),
('Zmin', 'd', '<'),
('Zmax', 'd', '<'),
('Zarray', '%dd' % (record['NumPoints']), '<'),
('Mmin', 'd', '<'),
('Mmax', 'd', '<'),
('Marray', '%dd' % (record['NumPoints']), '<'))
content = {}
content.update(record)
content['Parts Index'] = record['Parts Index']
verts = []
[verts.extend(vert) for vert in record['Vertices']]
content['Vertices'] = verts
content = _packDict(contentStruct, content)
return rheader + content
class Polygon(PolyLine):
""" Packs and Unpacks a ShapeFile Polygon Type
Indentical to PolyLine.
Example:
>>> import pysal
>>> shp = shp_file(pysal.examples.get_path('Polygon.shp'))
>>> rec = shp.get_shape(1)
>>> rec == {'BBOX Ymax': -0.3126531125455273, 'NumPoints': 7, 'BBOX Ymin': -0.35957259110238166, 'NumParts': 1, 'Vertices': [(0.05396439570183631, -0.3126531125455273), (0.051473095955454629, -0.35251390848763364), (0.059777428443393454, -0.34254870950210703), (0.063099161438568974, -0.34462479262409174), (0.048981796209073003, -0.35957259110238166), (0.046905713087088297, -0.3126531125455273), (0.05396439570183631, -0.3126531125455273)], 'BBOX Xmax': 0.063099161438568974, 'BBOX Xmin': 0.046905713087088297, 'Shape Type': 5, 'Parts Index': [0]}
True
>>> pos = shp.fileObj.seek(shp._shx.index[1][0]+8) #+8 byte record header
>>> dat = shp.fileObj.read(shp._shx.index[1][1])
>>> dat == Polygon.pack(rec)
True
"""
String_Type = 'POLYGON'
class MultiPoint:
def __init__(self):
raise NotImplementedError("No MultiPoint Support at this time.")
class PolygonZ(PolyLineZ):
String_Type = 'POLYGONZ'
class MultiPointZ:
def __init__(self):
raise NotImplementedError("No MultiPointZ Support at this time.")
class PointM:
def __init__(self):
raise NotImplementedError("No PointM Support at this time.")
class PolyLineM:
def __init__(self):
raise NotImplementedError("No PolyLineM Support at this time.")
class PolygonM:
def __init__(self):
raise NotImplementedError("No PolygonM Support at this time.")
class MultiPointM:
def __init__(self):
raise NotImplementedError("No MultiPointM Support at this time.")
class MultiPatch:
def __init__(self):
raise NotImplementedError("No MultiPatch Support at this time.")
TYPE_DISPATCH = {0: NullShape, 1: Point, 3: PolyLine, 5: Polygon, 8: MultiPoint, 11: PointZ, 13: PolyLineZ, 15: PolygonZ, 18: MultiPointZ, 21: PointM, 23: PolyLineM, 25: PolygonM, 28: MultiPointM, 31: MultiPatch, 'POINT': Point, 'POINTZ': PointZ, 'POINTM': PointM, 'ARC': PolyLine, 'ARCZ': PolyLineZ, 'ARCM': PolyLineM, 'POLYGON': Polygon, 'POLYGONZ': PolygonZ, 'POLYGONM': PolygonM, 'MULTIPOINT': MultiPoint, 'MULTIPOINTZ': MultiPointZ, 'MULTIPOINTM': MultiPointM, 'MULTIPATCH': MultiPatch}
|
mdmdmdmdmd/plugin.video.youtube
|
refs/heads/master
|
resources/lib/youtube/helper/yt_specials.py
|
8
|
__author__ = 'bromix'
from resources.lib import kodion
from resources.lib.kodion.items import DirectoryItem, UriItem
from resources.lib.youtube.helper import v3, tv, extract_urls, UrlResolver, UrlToItemConverter
from . import utils
def _process_related_videos(provider, context, re_match):
result = []
provider.set_content_type(context, kodion.constants.content_type.VIDEOS)
page_token = context.get_param('page_token', '')
video_id = context.get_param('video_id', '')
if video_id:
json_data = provider.get_client(context).get_related_videos(video_id=video_id, page_token=page_token)
if not v3.handle_error(provider, context, json_data):
return False
result.extend(v3.response_to_items(provider, context, json_data, process_next_page=False))
pass
return result
def _process_recommendations(provider, context, re_match):
provider.set_content_type(context, kodion.constants.content_type.VIDEOS)
result = []
page_token = context.get_param('page_token', '')
json_data = provider.get_client(context).get_activities('home', page_token=page_token)
if not v3.handle_error(provider, context, json_data):
return False
result.extend(v3.response_to_items(provider, context, json_data))
return result
def _process_popular_right_now(provider, context, re_match):
provider.set_content_type(context, kodion.constants.content_type.VIDEOS)
result = []
page_token = context.get_param('page_token', '')
json_data = provider.get_client(context).get_popular_videos(page_token=page_token)
if not v3.handle_error(provider, context, json_data):
return False
result.extend(v3.response_to_items(provider, context, json_data))
return result
def _process_browse_channels(provider, context, re_match):
result = []
page_token = context.get_param('page_token', '')
guide_id = context.get_param('guide_id', '')
if guide_id:
json_data = provider.get_client(context).get_guide_category(guide_id)
if not v3.handle_error(provider, context, json_data):
return False
result.extend(v3.response_to_items(provider, context, json_data))
pass
else:
json_data = provider.get_client(context).get_guide_categories()
if not v3.handle_error(provider, context, json_data):
return False
result.extend(v3.response_to_items(provider, context, json_data))
pass
return result
def _process_disliked_videos(provider, context, re_match):
provider.set_content_type(context, kodion.constants.content_type.VIDEOS)
result = []
page_token = context.get_param('page_token', '')
json_data = provider.get_client(context).get_disliked_videos(page_token=page_token)
if not v3.handle_error(provider, context, json_data):
return False
result.extend(v3.response_to_items(provider, context, json_data))
return result
def _process_live_events(provider, context, re_match):
def _sort(x):
return x.get_aired()
provider.set_content_type(context, kodion.constants.content_type.VIDEOS)
result = []
# TODO: cache result
page_token = context.get_param('page_token', '')
json_data = provider.get_client(context).get_live_events(event_type='live', page_token=page_token)
if not v3.handle_error(provider, context, json_data):
return False
result.extend(v3.response_to_items(provider, context, json_data, sort=_sort, reverse_sort=True))
return result
def _process_description_links(provider, context, re_match):
def _extract_urls(_video_id):
provider.set_content_type(context, kodion.constants.content_type.VIDEOS)
result = []
progress_dialog = context.get_ui().create_progress_dialog(
heading=context.localize(kodion.constants.localize.COMMON_PLEASE_WAIT), background=False)
resource_manager = provider.get_resource_manager(context)
video_data = resource_manager.get_videos([_video_id])
yt_item = video_data[_video_id]
snippet = yt_item['snippet'] # crash if not conform
description = kodion.utils.strip_html_from_text(snippet['description'])
urls = extract_urls(description)
progress_dialog.set_total(len(urls))
url_resolver = UrlResolver(context)
res_urls = []
for url in urls:
context.log_debug('Resolving url "%s"' % url)
progress_dialog.update(steps=1, text=url)
resolved_url = url_resolver.resolve(url)
context.log_debug('Resoled url "%s"' % resolved_url)
res_urls.append(resolved_url)
if progress_dialog.is_aborted():
context.log_debug('Resolving urls aborted')
break
context.sleep(50)
pass
url_to_item_converter = UrlToItemConverter()
url_to_item_converter.add_urls(res_urls, provider, context)
result.extend(url_to_item_converter.get_items(provider, context))
progress_dialog.close()
if len(result) == 0:
progress_dialog.close()
context.get_ui().on_ok(title=context.localize(provider.LOCAL_MAP['youtube.video.description.links']),
text=context.localize(
provider.LOCAL_MAP['youtube.video.description.links.not_found']))
return False
return result
def _display_channels(_channel_ids):
_channel_id_dict = {}
for channel_id in _channel_ids:
channel_item = DirectoryItem('', context.create_uri(['channel', channel_id]))
channel_item.set_fanart(provider.get_fanart(context))
_channel_id_dict[channel_id] = channel_item
pass
_channel_item_dict = {}
utils.update_channel_infos(provider, context, _channel_id_dict, channel_items_dict=_channel_item_dict)
utils.update_fanarts(provider, context, _channel_item_dict)
# clean up - remove empty entries
_result = []
for key in _channel_id_dict:
_channel_item = _channel_id_dict[key]
if _channel_item.get_name():
_result.append(_channel_item)
pass
pass
return _result
def _display_playlists(_playlist_ids):
_playlist_id_dict = {}
for playlist_id in _playlist_ids:
playlist_item = DirectoryItem('', context.create_uri(['playlist', playlist_id]))
playlist_item.set_fanart(provider.get_fanart(context))
_playlist_id_dict[playlist_id] = playlist_item
pass
_channel_item_dict = {}
utils.update_playlist_infos(provider, context, _playlist_id_dict, _channel_item_dict)
utils.update_fanarts(provider, context, _channel_item_dict)
# clean up - remove empty entries
_result = []
for key in _playlist_id_dict:
_playlist_item = _playlist_id_dict[key]
if _playlist_item.get_name():
_result.append(_playlist_item)
pass
return _result
video_id = context.get_param('video_id', '')
if video_id:
return _extract_urls(video_id)
channel_ids = context.get_param('channel_ids', '')
if channel_ids:
channel_ids = channel_ids.split(',')
if len(channel_ids) > 0:
return _display_channels(channel_ids)
pass
playlist_ids = context.get_param('playlist_ids', '')
if playlist_ids:
playlist_ids = playlist_ids.split(',')
if len(playlist_ids) > 0:
return _display_playlists(playlist_ids)
pass
context.log_error('Missing video_id or playlist_ids for description links')
return False
def _process_new_uploaded_videos_tv(provider, context, re_match):
provider.set_content_type(context, kodion.constants.content_type.VIDEOS)
result = []
next_page_token = context.get_param('next_page_token', '')
offset = int(context.get_param('offset', 0))
json_data = provider.get_client(context).get_my_subscriptions(page_token=next_page_token, offset=offset)
result.extend(tv.my_subscriptions_to_items(provider, context, json_data))
return result
def process(category, provider, context, re_match):
result = []
# we need a login
client = provider.get_client(context)
if not provider.is_logged_in() and category in ['new_uploaded_videos_tv', 'disliked_videos']:
return UriItem(context.create_uri(['sign', 'in']))
if category == 'related_videos':
return _process_related_videos(provider, context, re_match)
elif category == 'popular_right_now':
return _process_popular_right_now(provider, context, re_match)
elif category == 'recommendations':
return _process_recommendations(provider, context, re_match)
elif category == 'browse_channels':
return _process_browse_channels(provider, context, re_match)
elif category == 'new_uploaded_videos_tv':
return _process_new_uploaded_videos_tv(provider, context, re_match)
elif category == 'disliked_videos':
return _process_disliked_videos(provider, context, re_match)
elif category == 'live':
return _process_live_events(provider, context, re_match)
elif category == 'description_links':
return _process_description_links(provider, context, re_match)
else:
raise kodion.KodionException("YouTube special category '%s' not found" % category)
|
ToonBoxEntertainment/rez
|
refs/heads/master
|
src/rezgui/util.py
|
7
|
from rezgui.qt import QtGui
from rez.utils.formatting import readable_time_duration
import os.path
import time
def create_pane(widgets, horizontal, parent_widget=None, compact=False,
compact_spacing=2):
"""Create a widget containing an aligned set of widgets.
Args:
widgets (list of `QWidget`).
horizontal (bool).
align (str): One of:
- 'left', 'right' (horizontal);
- 'top', 'bottom' (vertical)
parent_widget (`QWidget`): Owner widget, QWidget is created if this
is not provided.
Returns:
`QWidget`
"""
pane = parent_widget or QtGui.QWidget()
type_ = QtGui.QHBoxLayout if horizontal else QtGui.QVBoxLayout
layout = type_()
if compact:
layout.setSpacing(compact_spacing)
layout.setContentsMargins(compact_spacing, compact_spacing,
compact_spacing, compact_spacing)
for widget in widgets:
stretch = 0
if isinstance(widget, tuple):
widget, stretch = widget
if isinstance(widget, int):
layout.addSpacing(widget)
elif widget:
layout.addWidget(widget, stretch)
else:
layout.addStretch()
pane.setLayout(layout)
return pane
icons = {}
def get_icon(name, as_qicon=False):
"""Returns a `QPixmap` containing the given image, or a QIcon if `as_qicon`
is True"""
filename = name + ".png"
icon = icons.get(filename)
if not icon:
path = os.path.dirname(__file__)
path = os.path.join(path, "icons")
filepath = os.path.join(path, filename)
if not os.path.exists(filepath):
filepath = os.path.join(path, "pink.png")
icon = QtGui.QPixmap(filepath)
icons[filename] = icon
return QtGui.QIcon(icon) if as_qicon else icon
def get_icon_widget(filename, tooltip=None):
icon = get_icon(filename)
icon_label = QtGui.QLabel()
icon_label.setPixmap(icon)
if tooltip:
icon_label.setToolTip(tooltip)
return icon_label
def get_timestamp_str(timestamp):
now = int(time.time())
release_time = time.localtime(timestamp)
release_time_str = time.strftime('%d %b %Y %H:%M:%S', release_time)
ago = readable_time_duration(now - timestamp)
return "%s (%s ago)" % (release_time_str, ago)
def add_menu_action(menu, label, slot=None, icon_name=None, group=None,
parent=None):
nargs = []
if icon_name:
icon = get_icon(icon_name, as_qicon=True)
nargs.append(icon)
nargs += [label, menu]
if parent:
nargs.append(parent)
action = QtGui.QAction(*nargs)
if slot:
action.triggered.connect(slot)
if group:
action.setCheckable(True)
group.addAction(action)
menu.addAction(action)
return action
def interp_color(a, b, f):
"""Interpolate between two colors.
Returns:
`QColor` object.
"""
a_ = (a.redF(), a.greenF(), a.blueF())
b_ = (b.redF(), b.greenF(), b.blueF())
a_ = [x * (1 - f) for x in a_]
b_ = [x * f for x in b_]
c = [x + y for x, y in zip(a_, b_)]
return QtGui.QColor.fromRgbF(*c)
def create_toolbutton(entries, parent=None):
"""Create a toolbutton.
Args:
entries: List of (label, slot) tuples.
Returns:
`QtGui.QToolBar`.
"""
btn = QtGui.QToolButton(parent)
menu = QtGui.QMenu()
actions = []
for label, slot in entries:
action = add_menu_action(menu, label, slot)
actions.append(action)
btn.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
btn.setDefaultAction(actions[0])
btn.setMenu(menu)
return btn, actions
def update_font(widget, italic=None, bold=None, underline=None):
font = widget.font()
if italic is not None:
font.setItalic(italic)
if bold is not None:
font.setBold(bold)
if underline is not None:
font.setUnderline(underline)
widget.setFont(font)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
google-research/google-research
|
refs/heads/master
|
bigg/bigg/model/tree_clib/tree_lib.py
|
1
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import numpy as np
import random
import os
import sys
import networkx as nx
from tqdm import tqdm
# pylint: skip-file
try:
import torch
except:
print('no torch loaded')
class CtypeGraph(object):
def __init__(self, g):
self.num_nodes = len(g)
self.num_edges = len(g.edges())
self.edge_pairs = np.zeros((self.num_edges * 2, ), dtype=np.int32)
for i, (x, y) in enumerate(g.edges()):
self.edge_pairs[i * 2] = x
self.edge_pairs[i * 2 + 1] = y
class _tree_lib(object):
def __init__(self):
pass
def setup(self, config):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.lib = ctypes.CDLL('%s/build/dll/libtree.so' % dir_path)
self.lib.Init.restype = ctypes.c_int
self.lib.PrepareTrain.restype = ctypes.c_int
self.lib.AddGraph.restype = ctypes.c_int
self.lib.TotalTreeNodes.restype = ctypes.c_int
self.lib.MaxTreeDepth.restype = ctypes.c_int
self.lib.NumPrevDep.restype = ctypes.c_int
self.lib.NumBottomDep.restype = ctypes.c_int
self.lib.NumRowBottomDep.restype = ctypes.c_int
self.lib.NumRowPastDep.restype = ctypes.c_int
self.lib.NumRowTopDep.restype = ctypes.c_int
self.lib.RowSumSteps.restype = ctypes.c_int
self.lib.RowMergeSteps.restype = ctypes.c_int
self.lib.NumRowSumOut.restype = ctypes.c_int
self.lib.NumRowSumNext.restype = ctypes.c_int
self.lib.NumCurNodes.restype = ctypes.c_int
self.lib.NumInternalNodes.restype = ctypes.c_int
self.lib.NumLeftBot.restype = ctypes.c_int
self.lib.GetNumNextStates.restype = ctypes.c_int
args = 'this -bits_compress %d -embed_dim %d -gpu %d -bfs_permute %d -seed %d' % (config.bits_compress, config.embed_dim, config.gpu, config.bfs_permute, config.seed)
args = args.split()
if sys.version_info[0] > 2:
args = [arg.encode() for arg in args] # str -> bytes for each element in args
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
self.lib.Init(len(args), arr)
self.embed_dim = config.embed_dim
self.device = config.device
self.num_graphs = 0
self.graph_stats = []
def TotalTreeNodes(self):
return self.lib.TotalTreeNodes()
def InsertGraph(self, nx_g, bipart_stats=None):
gid = self.num_graphs
self.num_graphs += 1
if isinstance(nx_g, CtypeGraph):
ctype_g = nx_g
else:
ctype_g = CtypeGraph(nx_g)
self.graph_stats.append((ctype_g.num_nodes, ctype_g.num_edges))
if bipart_stats is None:
n, m = -1, -1
else:
n, m = bipart_stats
self.lib.AddGraph(gid, ctype_g.num_nodes, ctype_g.num_edges,
ctypes.c_void_p(ctype_g.edge_pairs.ctypes.data), n, m)
return gid
def PrepareMiniBatch(self, list_gids, list_node_start=None, num_nodes=-1, list_col_ranges=None, new_batch=True):
n_graphs = len(list_gids)
list_gids = np.array(list_gids, dtype=np.int32)
if list_node_start is None:
list_node_start = np.zeros((n_graphs,), dtype=np.int32)
else:
list_node_start = np.array(list_node_start, dtype=np.int32)
if list_col_ranges is None:
list_col_start = np.zeros((n_graphs,), dtype=np.int32) - 1
list_col_end = np.zeros((n_graphs,), dtype=np.int32) - 1
else:
list_col_start, list_col_end = zip(*list_col_ranges)
list_col_start = np.array(list_col_start, dtype=np.int32)
list_col_end = np.array(list_col_end, dtype=np.int32)
self.lib.PrepareTrain(n_graphs,
ctypes.c_void_p(list_gids.ctypes.data),
ctypes.c_void_p(list_node_start.ctypes.data),
ctypes.c_void_p(list_col_start.ctypes.data),
ctypes.c_void_p(list_col_end.ctypes.data),
num_nodes,
int(new_batch))
list_nnodes = []
for i, gid in enumerate(list_gids):
tot_nodes = self.graph_stats[gid][0]
if num_nodes <= 0:
cur_num = tot_nodes - list_node_start[i]
else:
cur_num = min(num_nodes, tot_nodes - list_node_start[i])
list_nnodes.append(cur_num)
self.list_nnodes = list_nnodes
return list_nnodes
def PrepareTreeEmbed(self):
max_d = self.lib.MaxTreeDepth()
all_ids = []
for d in range(max_d + 1):
ids_d = []
for i in range(2):
num_prev = self.lib.NumPrevDep(d, i)
num_bot = self.lib.NumBottomDep(d, i)
bot_froms = np.empty((num_bot,), dtype=np.int32)
bot_tos = np.empty((num_bot,), dtype=np.int32)
prev_froms = np.empty((num_prev,), dtype=np.int32)
prev_tos = np.empty((num_prev,), dtype=np.int32)
self.lib.SetTreeEmbedIds(d,
i,
ctypes.c_void_p(bot_froms.ctypes.data),
ctypes.c_void_p(bot_tos.ctypes.data),
ctypes.c_void_p(prev_froms.ctypes.data),
ctypes.c_void_p(prev_tos.ctypes.data))
ids_d.append((bot_froms, bot_tos, prev_froms, prev_tos))
all_ids.append(ids_d)
return all_ids
def PrepareBinary(self):
max_d = self.lib.MaxBinFeatDepth()
all_bin_feats = []
base_feat = torch.zeros(2, self.embed_dim)
base_feat[0, 0] = -1
base_feat[1, 0] = 1
base_feat = base_feat.to(self.device)
for d in range(max_d):
num_nodes = self.lib.NumBinNodes(d)
if num_nodes == 0:
all_bin_feats.append(base_feat)
else:
if self.device == torch.device('cpu'):
feat = torch.zeros(num_nodes + 2, self.embed_dim)
dev = 0
else:
feat = torch.cuda.FloatTensor(num_nodes + 2, self.embed_dim).fill_(0)
dev = 1
self.lib.SetBinaryFeat(d, ctypes.c_void_p(feat.data_ptr()), dev)
all_bin_feats.append(feat)
return all_bin_feats, (base_feat, base_feat)
def PrepareRowEmbed(self):
tot_levels = self.lib.RowMergeSteps()
lv = 0
all_ids = []
for lv in range(tot_levels):
ids_d = []
for i in range(2):
num_prev = self.lib.NumRowTopDep(lv, i)
num_bot = self.lib.NumRowBottomDep(i) if lv == 0 else 0
num_past = self.lib.NumRowPastDep(lv, i)
bot_froms = np.empty((num_bot,), dtype=np.int32)
bot_tos = np.empty((num_bot,), dtype=np.int32)
prev_froms = np.empty((num_prev,), dtype=np.int32)
prev_tos = np.empty((num_prev,), dtype=np.int32)
past_froms = np.empty((num_past,), dtype=np.int32)
past_tos = np.empty((num_past,), dtype=np.int32)
self.lib.SetRowEmbedIds(i,
lv,
ctypes.c_void_p(bot_froms.ctypes.data),
ctypes.c_void_p(bot_tos.ctypes.data),
ctypes.c_void_p(prev_froms.ctypes.data),
ctypes.c_void_p(prev_tos.ctypes.data),
ctypes.c_void_p(past_froms.ctypes.data),
ctypes.c_void_p(past_tos.ctypes.data))
ids_d.append((bot_froms, bot_tos, prev_froms, prev_tos, past_froms, past_tos))
all_ids.append(ids_d)
return all_ids
def PrepareRowSummary(self):
total_steps = self.lib.RowSumSteps()
all_ids = []
total_nodes = np.sum(self.list_nnodes)
init_ids = np.empty((total_nodes,), dtype=np.int32)
self.lib.SetRowSumInit(ctypes.c_void_p(init_ids.ctypes.data))
for i in range(total_steps):
num_done = self.lib.NumRowSumOut(i)
num_next = self.lib.NumRowSumNext(i)
step_from = np.empty((num_done,), dtype=np.int32)
step_to = np.empty((num_done,), dtype=np.int32)
step_next = np.empty((num_next,), dtype=np.int32)
step_input = np.empty((num_next,), dtype=np.int32)
self.lib.SetRowSumIds(i,
ctypes.c_void_p(step_from.ctypes.data),
ctypes.c_void_p(step_to.ctypes.data),
ctypes.c_void_p(step_input.ctypes.data),
ctypes.c_void_p(step_next.ctypes.data))
all_ids.append((step_from, step_to, step_next, step_input))
total_nodes -= num_done
last_ids = np.empty((total_nodes,), dtype=np.int32)
self.lib.SetRowSumLast(ctypes.c_void_p(last_ids.ctypes.data))
num_next = self.lib.GetNumNextStates()
next_ids = np.empty((num_next,), dtype=np.int32)
self.lib.GetNextStates(ctypes.c_void_p(next_ids.ctypes.data))
np_pos = np.empty((np.sum(self.list_nnodes),), dtype=np.int32)
self.lib.GetCurPos(ctypes.c_void_p(np_pos.ctypes.data))
return init_ids, all_ids, last_ids, next_ids, torch.tensor(np_pos, dtype=torch.float32).to(self.device)
def GetChLabel(self, lr, depth=-1, dtype=None):
if lr == 0:
total_nodes = np.sum(self.list_nnodes)
has_ch = np.empty((total_nodes,), dtype=np.int32)
self.lib.HasChild(ctypes.c_void_p(has_ch.ctypes.data))
num_ch = None
else:
n = self.lib.NumInternalNodes(depth)
has_ch = np.empty((n,), dtype=np.int32)
self.lib.GetChMask(lr, depth,
ctypes.c_void_p(has_ch.ctypes.data))
num_ch = np.empty((n,), dtype=np.int32)
self.lib.GetNumCh(lr, depth,
ctypes.c_void_p(num_ch.ctypes.data))
num_ch = torch.tensor(num_ch, dtype=torch.float32).to(self.device)
if dtype is not None:
has_ch = has_ch.astype(dtype)
return has_ch, num_ch
def QueryNonLeaf(self, depth):
n = self.lib.NumCurNodes(depth)
if n == 0:
return None
is_internal = np.empty((n,), dtype=np.int32)
self.lib.GetInternalMask(depth, ctypes.c_void_p(is_internal.ctypes.data))
return is_internal.astype(np.bool)
def GetLeftRootStates(self, depth):
n = self.lib.NumInternalNodes(depth)
left_bot = self.lib.NumLeftBot(depth)
left_next = n - left_bot
bot_froms = np.empty((left_bot,), dtype=np.int32)
bot_tos = np.empty((left_bot,), dtype=np.int32)
next_froms = np.empty((left_next,), dtype=np.int32)
next_tos = np.empty((left_next,), dtype=np.int32)
self.lib.SetLeftState(depth,
ctypes.c_void_p(bot_froms.ctypes.data),
ctypes.c_void_p(bot_tos.ctypes.data),
ctypes.c_void_p(next_froms.ctypes.data),
ctypes.c_void_p(next_tos.ctypes.data))
if left_bot == 0:
bot_froms = bot_tos = None
if left_next == 0:
next_froms = next_tos = None
return bot_froms, bot_tos, next_froms, next_tos
def GetLeftRightSelect(self, depth, num_left, num_right):
left_froms = np.empty((num_left,), dtype=np.int32)
left_tos = np.empty((num_left,), dtype=np.int32)
right_froms = np.empty((num_right,), dtype=np.int32)
right_tos = np.empty((num_right,), dtype=np.int32)
self.lib.LeftRightSelect(depth,
ctypes.c_void_p(left_froms.ctypes.data),
ctypes.c_void_p(left_tos.ctypes.data),
ctypes.c_void_p(right_froms.ctypes.data),
ctypes.c_void_p(right_tos.ctypes.data))
return left_froms, left_tos, right_froms, right_tos
TreeLib = _tree_lib()
def setup_treelib(config):
global TreeLib
dll_path = '%s/build/dll/libtree.so' % os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dll_path):
TreeLib.setup(config)
|
patrickwind/My_Blog
|
refs/heads/master
|
venv/lib/python2.7/site-packages/setuptools/py27compat.py
|
958
|
"""
Compatibility Support for Python 2.7 and earlier
"""
import sys
def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
if sys.version_info < (3,):
def get_all_headers(message, key):
return message.getheaders(key)
|
fldc/CouchPotatoServer
|
refs/heads/custom
|
libs/unrar2/rar_exceptions.py
|
153
|
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Low level interface - see UnRARDLL\UNRARDLL.TXT
class ArchiveHeaderBroken(Exception): pass
class InvalidRARArchive(Exception): pass
class FileOpenError(Exception): pass
class IncorrectRARPassword(Exception): pass
class InvalidRARArchiveUsage(Exception): pass
|
fhaoquan/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Doc/includes/mp_workers.py
|
52
|
import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print('Unordered results:')
for i in range(len(TASKS1)):
print('\t', done_queue.get())
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print('\t', done_queue.get())
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()
|
bobsilverberg/Marketplace.Python
|
refs/heads/master
|
marketplace/connection.py
|
2
|
""" Provide connection with Marketplace API
"""
import json
import logging
import time
import urllib
import oauth2 as oauth
import requests
log = logging.getLogger('marketplace.%s' % __name__)
class NotExpectedStatusCode(requests.exceptions.HTTPError):
""" Raise if status code returned from API is not the expected one
"""
pass
def _get_args(consumer):
"""Provide a dict with oauth data
"""
return dict(
oauth_consumer_key=consumer.key,
oauth_nonce=oauth.generate_nonce(),
oauth_signature_method='HMAC-SHA1',
oauth_timestamp=int(time.time()),
oauth_version='1.0')
class Connection:
""" Keeps the consumer class and provides the way to connect to the
Marketplace API
"""
signature_method = oauth.SignatureMethod_HMAC_SHA1()
consumer = None
def __init__(self, consumer_key, consumer_secret):
self.set_consumer(consumer_key, consumer_secret)
def set_consumer(self, consumer_key, consumer_secret):
"""Sets the consumer attribute
"""
self.consumer = oauth.Consumer(consumer_key, consumer_secret)
def prepare_request(self, method, url, body=''):
"""Adds consumer and signs the request
:returns: headers of the signed request
"""
req = oauth.Request(method=method, url=url,
parameters=_get_args(self.consumer))
req.sign_request(self.signature_method, self.consumer, None)
headers = req.to_header()
headers['Content-type'] = 'application/json'
if body:
if method == 'GET':
body = urllib.urlencode(body)
else:
body = json.dumps(body)
return {"headers": headers, "data": body}
@staticmethod
def _get_error_reason(response):
"""Extract error reason from the response. It might be either
the 'reason' or the entire response
"""
body = response.json
if body and 'reason' in body:
return body['reason']
return response.content
def fetch(self, method, url, data=None, expected_status_code=None):
"""Prepare the headers, encode data, call API and provide
data it returns
"""
kwargs = self.prepare_request(method, url, data)
response = getattr(requests, method.lower())(url, **kwargs)
log.debug(str(response.__dict__))
if response.status_code >= 400:
response.raise_for_status()
if (expected_status_code
and response.status_code != expected_status_code):
raise NotExpectedStatusCode(self._get_error_reason(response))
return response
def fetch_json(self, method, url, data=None, expected_status_code=None):
"""Return json decoded data from fetch
"""
return self.fetch(method, url, data, expected_status_code).json()
|
miipl-naveen/optibizz
|
refs/heads/master
|
addons/point_of_sale/point_of_sale.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from datetime import datetime
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools import float_is_zero
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
import openerp.addons.product.product
_logger = logging.getLogger(__name__)
class pos_config(osv.osv):
_name = 'pos.config'
POS_CONFIG_STATE = [
('active', 'Active'),
('inactive', 'Inactive'),
('deprecated', 'Deprecated')
]
def _get_currency(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, False)
for pos_config in self.browse(cr, uid, ids, context=context):
if pos_config.journal_id:
currency_id = pos_config.journal_id.currency.id or pos_config.journal_id.company_id.currency_id.id
else:
currency_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.currency_id.id
result[pos_config.id] = currency_id
return result
_columns = {
'name' : fields.char('Point of Sale Name', select=1,
required=True, help="An internal identification of the point of sale"),
'journal_ids' : fields.many2many('account.journal', 'pos_config_journal_rel',
'pos_config_id', 'journal_id', 'Available Payment Methods',
domain="[('journal_user', '=', True ), ('type', 'in', ['bank', 'cash'])]",),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
'stock_location_id': fields.many2one('stock.location', 'Stock Location', domain=[('usage', '=', 'internal')], required=True),
'journal_id' : fields.many2one('account.journal', 'Sale Journal',
domain=[('type', '=', 'sale')],
help="Accounting journal used to post sales entries."),
'currency_id' : fields.function(_get_currency, type="many2one", string="Currency", relation="res.currency"),
'iface_self_checkout' : fields.boolean('Self Checkout Mode', # FIXME : this field is obsolete
help="Check this if this point of sale should open by default in a self checkout mode. If unchecked, Odoo uses the normal cashier mode by default."),
'iface_cashdrawer' : fields.boolean('Cashdrawer', help="Automatically open the cashdrawer"),
'iface_payment_terminal' : fields.boolean('Payment Terminal', help="Enables Payment Terminal integration"),
'iface_electronic_scale' : fields.boolean('Electronic Scale', help="Enables Electronic Scale integration"),
'iface_vkeyboard' : fields.boolean('Virtual KeyBoard', help="Enables an integrated Virtual Keyboard"),
'iface_print_via_proxy' : fields.boolean('Print via Proxy', help="Bypass browser printing and prints via the hardware proxy"),
'iface_scan_via_proxy' : fields.boolean('Scan via Proxy', help="Enable barcode scanning with a remotely connected barcode scanner"),
'iface_invoicing': fields.boolean('Invoicing',help='Enables invoice generation from the Point of Sale'),
'iface_big_scrollbars': fields.boolean('Large Scrollbars',help='For imprecise industrial touchscreens'),
'receipt_header': fields.text('Receipt Header',help="A short text that will be inserted as a header in the printed receipt"),
'receipt_footer': fields.text('Receipt Footer',help="A short text that will be inserted as a footer in the printed receipt"),
'proxy_ip': fields.char('IP Address', help='The hostname or ip address of the hardware proxy, Will be autodetected if left empty', size=45),
'state' : fields.selection(POS_CONFIG_STATE, 'Status', required=True, readonly=True, copy=False),
'sequence_id' : fields.many2one('ir.sequence', 'Order IDs Sequence', readonly=True,
help="This sequence is automatically created by Odoo but you can change it "\
"to customize the reference numbers of your orders.", copy=False),
'session_ids': fields.one2many('pos.session', 'config_id', 'Sessions'),
'group_by' : fields.boolean('Group Journal Items', help="Check this if you want to group the Journal Items by Product while closing a Session"),
'pricelist_id': fields.many2one('product.pricelist','Pricelist', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'barcode_product': fields.char('Product Barcodes', size=64, help='The pattern that identifies product barcodes'),
'barcode_cashier': fields.char('Cashier Barcodes', size=64, help='The pattern that identifies cashier login barcodes'),
'barcode_customer': fields.char('Customer Barcodes',size=64, help='The pattern that identifies customer\'s client card barcodes'),
'barcode_price': fields.char('Price Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded price'),
'barcode_weight': fields.char('Weight Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded weight'),
'barcode_discount': fields.char('Discount Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded discount'),
}
def _check_cash_control(self, cr, uid, ids, context=None):
return all(
(sum(int(journal.cash_control) for journal in record.journal_ids) <= 1)
for record in self.browse(cr, uid, ids, context=context)
)
def _check_company_location(self, cr, uid, ids, context=None):
for config in self.browse(cr, uid, ids, context=context):
if config.stock_location_id.company_id and config.stock_location_id.company_id.id != config.company_id.id:
return False
return True
def _check_company_journal(self, cr, uid, ids, context=None):
for config in self.browse(cr, uid, ids, context=context):
if config.journal_id and config.journal_id.company_id.id != config.company_id.id:
return False
return True
def _check_company_payment(self, cr, uid, ids, context=None):
for config in self.browse(cr, uid, ids, context=context):
journal_ids = [j.id for j in config.journal_ids]
if self.pool['account.journal'].search(cr, uid, [
('id', 'in', journal_ids),
('company_id', '!=', config.company_id.id)
], count=True, context=context):
return False
return True
_constraints = [
(_check_cash_control, "You cannot have two cash controls in one Point Of Sale !", ['journal_ids']),
(_check_company_location, "The company of the stock location is different than the one of point of sale", ['company_id', 'stock_location_id']),
(_check_company_journal, "The company of the sale journal is different than the one of point of sale", ['company_id', 'journal_id']),
(_check_company_payment, "The company of a payment method is different than the one of point of sale", ['company_id', 'journal_ids']),
]
def name_get(self, cr, uid, ids, context=None):
result = []
states = {
'opening_control': _('Opening Control'),
'opened': _('In Progress'),
'closing_control': _('Closing Control'),
'closed': _('Closed & Posted'),
}
for record in self.browse(cr, uid, ids, context=context):
if (not record.session_ids) or (record.session_ids[0].state=='closed'):
result.append((record.id, record.name+' ('+_('not used')+')'))
continue
session = record.session_ids[0]
result.append((record.id, record.name + ' ('+session.user_id.name+')')) #, '+states[session.state]+')'))
return result
def _default_sale_journal(self, cr, uid, context=None):
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'sale'), ('company_id', '=', company_id)], limit=1, context=context)
return res and res[0] or False
def _default_pricelist(self, cr, uid, context=None):
res = self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')], limit=1, context=context)
return res and res[0] or False
def _get_default_location(self, cr, uid, context=None):
wh_obj = self.pool.get('stock.warehouse')
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = wh_obj.search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
if res and res[0]:
return wh_obj.browse(cr, uid, res[0], context=context).lot_stock_id.id
return False
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
return company_id
_defaults = {
'state' : POS_CONFIG_STATE[0][0],
'journal_id': _default_sale_journal,
'group_by' : True,
'pricelist_id': _default_pricelist,
'iface_invoicing': True,
'stock_location_id': _get_default_location,
'company_id': _get_default_company,
'barcode_product': '*',
'barcode_cashier': '041*',
'barcode_customer':'042*',
'barcode_weight': '21xxxxxNNDDD',
'barcode_discount':'22xxxxxxxxNN',
'barcode_price': '23xxxxxNNNDD',
}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
p_type_obj = self.pool.get("stock.picking.type")
p_type = p_type_obj.browse(cr, uid, picking_type_id, context=context)
if p_type.default_location_src_id and p_type.default_location_src_id.usage == 'internal' and p_type.default_location_dest_id and p_type.default_location_dest_id.usage == 'customer':
return {'value': {'stock_location_id': p_type.default_location_src_id.id}}
return False
def set_active(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state' : 'active'}, context=context)
def set_inactive(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state' : 'inactive'}, context=context)
def set_deprecate(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state' : 'deprecated'}, context=context)
def create(self, cr, uid, values, context=None):
ir_sequence = self.pool.get('ir.sequence')
# force sequence_id field to new pos.order sequence
values['sequence_id'] = ir_sequence.create(cr, uid, {
'name': 'POS Order %s' % values['name'],
'padding': 4,
'prefix': "%s/" % values['name'],
'code': "pos.order",
'company_id': values.get('company_id', False),
}, context=context)
# TODO master: add field sequence_line_id on model
# this make sure we always have one available per company
ir_sequence.create(cr, uid, {
'name': 'POS order line %s' % values['name'],
'padding': 4,
'prefix': "%s/" % values['name'],
'code': "pos.order.line",
'company_id': values.get('company_id', False),
}, context=context)
return super(pos_config, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.sequence_id:
obj.sequence_id.unlink()
return super(pos_config, self).unlink(cr, uid, ids, context=context)
class pos_session(osv.osv):
_name = 'pos.session'
_order = 'id desc'
POS_SESSION_STATE = [
('opening_control', 'Opening Control'), # Signal open
('opened', 'In Progress'), # Signal closing
('closing_control', 'Closing Control'), # Signal close
('closed', 'Closed & Posted'),
]
def _compute_cash_all(self, cr, uid, ids, fieldnames, args, context=None):
result = dict()
for record in self.browse(cr, uid, ids, context=context):
result[record.id] = {
'cash_journal_id' : False,
'cash_register_id' : False,
'cash_control' : False,
}
for st in record.statement_ids:
if st.journal_id.cash_control == True:
result[record.id]['cash_control'] = True
result[record.id]['cash_journal_id'] = st.journal_id.id
result[record.id]['cash_register_id'] = st.id
return result
_columns = {
'config_id' : fields.many2one('pos.config', 'Point of Sale',
help="The physical point of sale you will use.",
required=True,
select=1,
domain="[('state', '=', 'active')]",
),
'name' : fields.char('Session ID', required=True, readonly=True),
'user_id' : fields.many2one('res.users', 'Responsible',
required=True,
select=1,
readonly=True,
states={'opening_control' : [('readonly', False)]}
),
'currency_id' : fields.related('config_id', 'currency_id', type="many2one", relation='res.currency', string="Currnecy"),
'start_at' : fields.datetime('Opening Date', readonly=True),
'stop_at' : fields.datetime('Closing Date', readonly=True),
'state' : fields.selection(POS_SESSION_STATE, 'Status',
required=True, readonly=True,
select=1, copy=False),
'sequence_number': fields.integer('Order Sequence Number', help='A sequence number that is incremented with each order'),
'login_number': fields.integer('Login Sequence Number', help='A sequence number that is incremented each time a user resumes the pos session'),
'cash_control' : fields.function(_compute_cash_all,
multi='cash',
type='boolean', string='Has Cash Control'),
'cash_journal_id' : fields.function(_compute_cash_all,
multi='cash',
type='many2one', relation='account.journal',
string='Cash Journal', store=True),
'cash_register_id' : fields.function(_compute_cash_all,
multi='cash',
type='many2one', relation='account.bank.statement',
string='Cash Register', store=True),
'opening_details_ids' : fields.related('cash_register_id', 'opening_details_ids',
type='one2many', relation='account.cashbox.line',
string='Opening Cash Control'),
'details_ids' : fields.related('cash_register_id', 'details_ids',
type='one2many', relation='account.cashbox.line',
string='Cash Control'),
'cash_register_balance_end_real' : fields.related('cash_register_id', 'balance_end_real',
type='float',
digits_compute=dp.get_precision('Account'),
string="Ending Balance",
help="Total of closing cash control lines.",
readonly=True),
'cash_register_balance_start' : fields.related('cash_register_id', 'balance_start',
type='float',
digits_compute=dp.get_precision('Account'),
string="Starting Balance",
help="Total of opening cash control lines.",
readonly=True),
'cash_register_total_entry_encoding' : fields.related('cash_register_id', 'total_entry_encoding',
string='Total Cash Transaction',
readonly=True,
help="Total of all paid sale orders"),
'cash_register_balance_end' : fields.related('cash_register_id', 'balance_end',
type='float',
digits_compute=dp.get_precision('Account'),
string="Theoretical Closing Balance",
help="Sum of opening balance and transactions.",
readonly=True),
'cash_register_difference' : fields.related('cash_register_id', 'difference',
type='float',
string='Difference',
help="Difference between the theoretical closing balance and the real closing balance.",
readonly=True),
'journal_ids' : fields.related('config_id', 'journal_ids',
type='many2many',
readonly=True,
relation='account.journal',
string='Available Payment Methods'),
'order_ids' : fields.one2many('pos.order', 'session_id', 'Orders'),
'statement_ids' : fields.one2many('account.bank.statement', 'pos_session_id', 'Bank Statement', readonly=True),
}
_defaults = {
'name' : '/',
'user_id' : lambda obj, cr, uid, context: uid,
'state' : 'opening_control',
'sequence_number': 1,
'login_number': 0,
}
_sql_constraints = [
('uniq_name', 'unique(name)', "The name of this POS Session must be unique !"),
]
def _check_unicity(self, cr, uid, ids, context=None):
for session in self.browse(cr, uid, ids, context=None):
# open if there is no session in 'opening_control', 'opened', 'closing_control' for one user
domain = [
('state', 'not in', ('closed','closing_control')),
('user_id', '=', session.user_id.id)
]
count = self.search_count(cr, uid, domain, context=context)
if count>1:
return False
return True
def _check_pos_config(self, cr, uid, ids, context=None):
for session in self.browse(cr, uid, ids, context=None):
domain = [
('state', '!=', 'closed'),
('config_id', '=', session.config_id.id)
]
count = self.search_count(cr, uid, domain, context=context)
if count>1:
return False
return True
_constraints = [
(_check_unicity, "You cannot create two active sessions with the same responsible!", ['user_id', 'state']),
(_check_pos_config, "You cannot create two active sessions related to the same point of sale!", ['config_id']),
]
def create(self, cr, uid, values, context=None):
context = dict(context or {})
config_id = values.get('config_id', False) or context.get('default_config_id', False)
if not config_id:
raise osv.except_osv( _('Error!'),
_("You should assign a Point of Sale to your session."))
# journal_id is not required on the pos_config because it does not
# exists at the installation. If nothing is configured at the
# installation we do the minimal configuration. Impossible to do in
# the .xml files as the CoA is not yet installed.
jobj = self.pool.get('pos.config')
pos_config = jobj.browse(cr, uid, config_id, context=context)
context.update({'company_id': pos_config.company_id.id})
if not pos_config.journal_id:
jid = jobj.default_get(cr, uid, ['journal_id'], context=context)['journal_id']
if jid:
jobj.write(cr, openerp.SUPERUSER_ID, [pos_config.id], {'journal_id': jid}, context=context)
else:
raise osv.except_osv( _('error!'),
_("Unable to open the session. You have to assign a sale journal to your point of sale."))
# define some cash journal if no payment method exists
if not pos_config.journal_ids:
journal_proxy = self.pool.get('account.journal')
cashids = journal_proxy.search(cr, uid, [('journal_user', '=', True), ('type','=','cash')], context=context)
if not cashids:
cashids = journal_proxy.search(cr, uid, [('type', '=', 'cash')], context=context)
if not cashids:
cashids = journal_proxy.search(cr, uid, [('journal_user','=',True)], context=context)
journal_proxy.write(cr, openerp.SUPERUSER_ID, cashids, {'journal_user': True})
jobj.write(cr, openerp.SUPERUSER_ID, [pos_config.id], {'journal_ids': [(6,0, cashids)]})
pos_config = jobj.browse(cr, uid, config_id, context=context)
bank_statement_ids = []
for journal in pos_config.journal_ids:
bank_values = {
'journal_id' : journal.id,
'user_id' : uid,
'company_id' : pos_config.company_id.id
}
statement_id = self.pool.get('account.bank.statement').create(cr, uid, bank_values, context=context)
bank_statement_ids.append(statement_id)
values.update({
'name': self.pool['ir.sequence'].get(cr, uid, 'pos.session', context=context),
'statement_ids' : [(6, 0, bank_statement_ids)],
'config_id': config_id
})
return super(pos_session, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
for statement in obj.statement_ids:
statement.unlink(context=context)
return super(pos_session, self).unlink(cr, uid, ids, context=context)
def open_cb(self, cr, uid, ids, context=None):
"""
call the Point Of Sale interface and set the pos.session to 'opened' (in progress)
"""
if context is None:
context = dict()
if isinstance(ids, (int, long)):
ids = [ids]
this_record = self.browse(cr, uid, ids[0], context=context)
this_record.signal_workflow('open')
context.update(active_id=this_record.id)
return {
'type' : 'ir.actions.act_url',
'url' : '/pos/web/',
'target': 'self',
}
def login(self, cr, uid, ids, context=None):
this_record = self.browse(cr, uid, ids[0], context=context)
this_record.write({
'login_number': this_record.login_number+1,
})
def wkf_action_open(self, cr, uid, ids, context=None):
# second browse because we need to refetch the data from the DB for cash_register_id
for record in self.browse(cr, uid, ids, context=context):
values = {}
if not record.start_at:
values['start_at'] = time.strftime('%Y-%m-%d %H:%M:%S')
values['state'] = 'opened'
record.write(values)
for st in record.statement_ids:
st.button_open()
return self.open_frontend_cb(cr, uid, ids, context=context)
def wkf_action_opening_control(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state' : 'opening_control'}, context=context)
def wkf_action_closing_control(self, cr, uid, ids, context=None):
for session in self.browse(cr, uid, ids, context=context):
for statement in session.statement_ids:
if (statement != session.cash_register_id) and (statement.balance_end != statement.balance_end_real):
self.pool.get('account.bank.statement').write(cr, uid, [statement.id], {'balance_end_real': statement.balance_end})
return self.write(cr, uid, ids, {'state' : 'closing_control', 'stop_at' : time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
def wkf_action_close(self, cr, uid, ids, context=None):
# Close CashBox
for record in self.browse(cr, uid, ids, context=context):
for st in record.statement_ids:
if abs(st.difference) > st.journal_id.amount_authorized_diff:
# The pos manager can close statements with maximums.
if not self.pool.get('ir.model.access').check_groups(cr, uid, "point_of_sale.group_pos_manager"):
raise osv.except_osv( _('Error!'),
_("Your ending balance is too different from the theoretical cash closing (%.2f), the maximum allowed is: %.2f. You can contact your manager to force it.") % (st.difference, st.journal_id.amount_authorized_diff))
if (st.journal_id.type not in ['bank', 'cash']):
raise osv.except_osv(_('Error!'),
_("The type of the journal for your payment method should be bank or cash "))
getattr(st, 'button_confirm_%s' % st.journal_id.type)(context=context)
self._confirm_orders(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state' : 'closed'}, context=context)
obj = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'point_of_sale', 'menu_point_root')[1]
return {
'type' : 'ir.actions.client',
'name' : 'Point of Sale Menu',
'tag' : 'reload',
'params' : {'menu_id': obj},
}
def _confirm_orders(self, cr, uid, ids, context=None):
pos_order_obj = self.pool.get('pos.order')
for session in self.browse(cr, uid, ids, context=context):
company_id = session.config_id.journal_id.company_id.id
local_context = dict(context or {}, force_company=company_id)
order_ids = [order.id for order in session.order_ids if order.state == 'paid']
move_id = pos_order_obj._create_account_move(cr, uid, session.start_at, session.name, session.config_id.journal_id.id, company_id, context=context)
pos_order_obj._create_account_move_line(cr, uid, order_ids, session, move_id, context=local_context)
for order in session.order_ids:
if order.state == 'done':
continue
if order.state not in ('paid', 'invoiced'):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm all orders of this session, because they have not the 'paid' status"))
else:
pos_order_obj.signal_workflow(cr, uid, [order.id], 'done')
return True
def open_frontend_cb(self, cr, uid, ids, context=None):
if not context:
context = {}
if not ids:
return {}
for session in self.browse(cr, uid, ids, context=context):
if session.user_id.id != uid:
raise osv.except_osv(
_('Error!'),
_("You cannot use the session of another users. This session is owned by %s. Please first close this one to use this point of sale." % session.user_id.name))
context.update({'active_id': ids[0]})
return {
'type' : 'ir.actions.act_url',
'target': 'self',
'url': '/pos/web/',
}
class pos_order(osv.osv):
_name = "pos.order"
_description = "Point of Sale"
_order = "id desc"
def _amount_line_tax(self, cr, uid, line, context=None):
account_tax_obj = self.pool['account.tax']
taxes_ids = [tax for tax in line.product_id.taxes_id if tax.company_id.id == line.order_id.company_id.id]
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = account_tax_obj.compute_all(cr, uid, taxes_ids, price, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)['taxes']
val = 0.0
for c in taxes:
val += c.get('amount', 0.0)
return val
def _order_fields(self, cr, uid, ui_order, context=None):
return {
'name': ui_order['name'],
'user_id': ui_order['user_id'] or False,
'session_id': ui_order['pos_session_id'],
'lines': ui_order['lines'],
'pos_reference':ui_order['name'],
'partner_id': ui_order['partner_id'] or False,
}
def _payment_fields(self, cr, uid, ui_paymentline, context=None):
return {
'amount': ui_paymentline['amount'] or 0.0,
'payment_date': ui_paymentline['name'],
'statement_id': ui_paymentline['statement_id'],
'payment_name': ui_paymentline.get('note',False),
'journal': ui_paymentline['journal_id'],
}
# This deals with orders that belong to a closed session. In order
# to recover from this we:
# - assign the order to another compatible open session
# - if that doesn't exist, create a new one
def _get_valid_session(self, cr, uid, order, context=None):
session = self.pool.get('pos.session')
closed_session = session.browse(cr, uid, order['pos_session_id'], context=context)
open_sessions = session.search(cr, uid, [('state', '=', 'opened'),
('config_id', '=', closed_session.config_id.id),
('user_id', '=', closed_session.user_id.id)],
limit=1, order="start_at DESC", context=context)
if open_sessions:
return open_sessions[0]
else:
new_session_id = session.create(cr, uid, {
'config_id': closed_session.config_id.id,
}, context=context)
new_session = session.browse(cr, uid, new_session_id, context=context)
# bypass opening_control (necessary when using cash control)
new_session.signal_workflow('open')
return new_session_id
def _process_order(self, cr, uid, order, context=None):
session = self.pool.get('pos.session').browse(cr, uid, order['pos_session_id'], context=context)
if session.state == 'closing_control' or session.state == 'closed':
session_id = self._get_valid_session(cr, uid, order, context=context)
session = self.pool.get('pos.session').browse(cr, uid, session_id, context=context)
order['pos_session_id'] = session_id
order_id = self.create(cr, uid, self._order_fields(cr, uid, order, context=context),context)
journal_ids = set()
for payments in order['statement_ids']:
self.add_payment(cr, uid, order_id, self._payment_fields(cr, uid, payments[2], context=context), context=context)
journal_ids.add(payments[2]['journal_id'])
if session.sequence_number <= order['sequence_number']:
session.write({'sequence_number': order['sequence_number'] + 1})
session.refresh()
if not float_is_zero(order['amount_return'], self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')):
cash_journal = session.cash_journal_id.id
if not cash_journal:
# Select for change one of the cash journals used in this payment
cash_journal_ids = self.pool['account.journal'].search(cr, uid, [
('type', '=', 'cash'),
('id', 'in', list(journal_ids)),
], limit=1, context=context)
if not cash_journal_ids:
# If none, select for change one of the cash journals of the POS
# This is used for example when a customer pays by credit card
# an amount higher than total amount of the order and gets cash back
cash_journal_ids = [statement.journal_id.id for statement in session.statement_ids
if statement.journal_id.type == 'cash']
if not cash_journal_ids:
raise osv.except_osv( _('error!'),
_("No cash statement found for this session. Unable to record returned cash."))
cash_journal = cash_journal_ids[0]
self.add_payment(cr, uid, order_id, {
'amount': -order['amount_return'],
'payment_date': time.strftime('%Y-%m-%d %H:%M:%S'),
'payment_name': _('return'),
'journal': cash_journal,
}, context=context)
return order_id
def create_from_ui(self, cr, uid, orders, context=None):
# Keep only new orders
submitted_references = [o['data']['name'] for o in orders]
existing_order_ids = self.search(cr, uid, [('pos_reference', 'in', submitted_references)], context=context)
existing_orders = self.read(cr, uid, existing_order_ids, ['pos_reference'], context=context)
existing_references = set([o['pos_reference'] for o in existing_orders])
orders_to_save = [o for o in orders if o['data']['name'] not in existing_references]
order_ids = []
for tmp_order in orders_to_save:
to_invoice = tmp_order['to_invoice']
order = tmp_order['data']
order_id = self._process_order(cr, uid, order, context=context)
order_ids.append(order_id)
try:
self.signal_workflow(cr, uid, [order_id], 'paid')
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
if to_invoice:
self.action_invoice(cr, uid, [order_id], context)
order_obj = self.browse(cr, uid, order_id, context)
self.pool['account.invoice'].signal_workflow(cr, uid, [order_obj.invoice_id.id], 'invoice_open')
return order_ids
def write(self, cr, uid, ids, vals, context=None):
res = super(pos_order, self).write(cr, uid, ids, vals, context=context)
#If you change the partner of the PoS order, change also the partner of the associated bank statement lines
partner_obj = self.pool.get('res.partner')
bsl_obj = self.pool.get("account.bank.statement.line")
if 'partner_id' in vals:
for posorder in self.browse(cr, uid, ids, context=context):
if posorder.invoice_id:
raise osv.except_osv( _('Error!'), _("You cannot change the partner of a POS order for which an invoice has already been issued."))
if vals['partner_id']:
p_id = partner_obj.browse(cr, uid, vals['partner_id'], context=context)
part_id = partner_obj._find_accounting_partner(p_id).id
else:
part_id = False
bsl_ids = [x.id for x in posorder.statement_ids]
bsl_obj.write(cr, uid, bsl_ids, {'partner_id': part_id}, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ('draft','cancel'):
raise osv.except_osv(_('Unable to Delete!'), _('In order to delete a sale, it must be new or cancelled.'))
return super(pos_order, self).unlink(cr, uid, ids, context=context)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
if not part:
return {'value': {}}
pricelist = self.pool.get('res.partner').browse(cr, uid, part, context=context).property_product_pricelist.id
return {'value': {'pricelist_id': pricelist}}
def _amount_all(self, cr, uid, ids, name, args, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_paid': 0.0,
'amount_return':0.0,
'amount_tax':0.0,
}
val1 = val2 = 0.0
cur = order.pricelist_id.currency_id
for payment in order.statement_ids:
res[order.id]['amount_paid'] += payment.amount
res[order.id]['amount_return'] += (payment.amount < 0 and payment.amount or 0)
for line in order.lines:
val1 += self._amount_line_tax(cr, uid, line, context=context)
val2 += line.price_subtotal
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val1)
amount_untaxed = cur_obj.round(cr, uid, cur, val2)
res[order.id]['amount_total'] = res[order.id]['amount_tax'] + amount_untaxed
return res
_columns = {
'name': fields.char('Order Ref', required=True, readonly=True, copy=False),
'company_id':fields.many2one('res.company', 'Company', required=True, readonly=True),
'date_order': fields.datetime('Order Date', readonly=True, select=True),
'user_id': fields.many2one('res.users', 'Salesman', help="Person who uses the the cash register. It can be a reliever, a student or an interim employee."),
'amount_tax': fields.function(_amount_all, string='Taxes', digits_compute=dp.get_precision('Account'), multi='all'),
'amount_total': fields.function(_amount_all, string='Total', digits_compute=dp.get_precision('Account'), multi='all'),
'amount_paid': fields.function(_amount_all, string='Paid', states={'draft': [('readonly', False)]}, readonly=True, digits_compute=dp.get_precision('Account'), multi='all'),
'amount_return': fields.function(_amount_all, 'Returned', digits_compute=dp.get_precision('Account'), multi='all'),
'lines': fields.one2many('pos.order.line', 'order_id', 'Order Lines', states={'draft': [('readonly', False)]}, readonly=True, copy=True),
'statement_ids': fields.one2many('account.bank.statement.line', 'pos_statement_id', 'Payments', states={'draft': [('readonly', False)]}, readonly=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, states={'draft': [('readonly', False)]}, readonly=True),
'partner_id': fields.many2one('res.partner', 'Customer', change_default=True, select=1, states={'draft': [('readonly', False)], 'paid': [('readonly', False)]}),
'sequence_number': fields.integer('Sequence Number', help='A session-unique sequence number for the order'),
'session_id' : fields.many2one('pos.session', 'Session',
#required=True,
select=1,
domain="[('state', '=', 'opened')]",
states={'draft' : [('readonly', False)]},
readonly=True),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('paid', 'Paid'),
('done', 'Posted'),
('invoiced', 'Invoiced')],
'Status', readonly=True, copy=False),
'invoice_id': fields.many2one('account.invoice', 'Invoice', copy=False),
'account_move': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False),
'picking_id': fields.many2one('stock.picking', 'Picking', readonly=True, copy=False),
'picking_type_id': fields.related('session_id', 'config_id', 'picking_type_id', string="Picking Type", type='many2one', relation='stock.picking.type'),
'location_id': fields.related('session_id', 'config_id', 'stock_location_id', string="Location", type='many2one', store=True, relation='stock.location'),
'note': fields.text('Internal Notes'),
'nb_print': fields.integer('Number of Print', readonly=True, copy=False),
'pos_reference': fields.char('Receipt Ref', readonly=True, copy=False),
'sale_journal': fields.related('session_id', 'config_id', 'journal_id', relation='account.journal', type='many2one', string='Sale Journal', store=True, readonly=True),
}
def _default_session(self, cr, uid, context=None):
so = self.pool.get('pos.session')
session_ids = so.search(cr, uid, [('state','=', 'opened'), ('user_id','=',uid)], context=context)
return session_ids and session_ids[0] or False
def _default_pricelist(self, cr, uid, context=None):
session_ids = self._default_session(cr, uid, context)
if session_ids:
session_record = self.pool.get('pos.session').browse(cr, uid, session_ids, context=context)
return session_record.config_id.pricelist_id and session_record.config_id.pricelist_id.id or False
return False
def _get_out_picking_type(self, cr, uid, context=None):
return self.pool.get('ir.model.data').xmlid_to_res_id(
cr, uid, 'point_of_sale.picking_type_posout', context=context)
_defaults = {
'user_id': lambda self, cr, uid, context: uid,
'state': 'draft',
'name': '/',
'date_order': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'nb_print': 0,
'sequence_number': 1,
'session_id': _default_session,
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
'pricelist_id': _default_pricelist,
}
def create(self, cr, uid, values, context=None):
if values.get('session_id'):
# set name based on the sequence specified on the config
session = self.pool['pos.session'].browse(cr, uid, values['session_id'], context=context)
values['name'] = session.config_id.sequence_id._next()
else:
# fallback on any pos.order sequence
values['name'] = self.pool.get('ir.sequence').get_id(cr, uid, 'pos.order', 'code', context=context)
return super(pos_order, self).create(cr, uid, values, context=context)
def test_paid(self, cr, uid, ids, context=None):
"""A Point of Sale is paid when the sum
@return: True
"""
for order in self.browse(cr, uid, ids, context=context):
if order.lines and not order.amount_total:
return True
if (not order.lines) or (not order.statement_ids) or \
(abs(order.amount_total-order.amount_paid) > 0.00001):
return False
return True
def create_picking(self, cr, uid, ids, context=None):
"""Create a picking for each order and validate it."""
picking_obj = self.pool.get('stock.picking')
partner_obj = self.pool.get('res.partner')
move_obj = self.pool.get('stock.move')
for order in self.browse(cr, uid, ids, context=context):
if all(t == 'service' for t in order.lines.mapped('product_id.type')):
continue
addr = order.partner_id and partner_obj.address_get(cr, uid, [order.partner_id.id], ['delivery']) or {}
picking_type = order.picking_type_id
picking_id = False
if picking_type:
picking_id = picking_obj.create(cr, uid, {
'origin': order.name,
'partner_id': addr.get('delivery',False),
'date_done' : order.date_order,
'picking_type_id': picking_type.id,
'company_id': order.company_id.id,
'move_type': 'direct',
'note': order.note or "",
'invoice_state': 'none',
}, context=context)
self.write(cr, uid, [order.id], {'picking_id': picking_id}, context=context)
location_id = order.location_id.id
if order.partner_id:
destination_id = order.partner_id.property_stock_customer.id
elif picking_type:
if not picking_type.default_location_dest_id:
raise osv.except_osv(_('Error!'), _('Missing source or destination location for picking type %s. Please configure those fields and try again.' % (picking_type.name,)))
destination_id = picking_type.default_location_dest_id.id
else:
destination_id = partner_obj.default_get(cr, uid, ['property_stock_customer'], context=context)['property_stock_customer']
move_list = []
for line in order.lines:
if line.product_id and line.product_id.type == 'service':
continue
move_list.append(move_obj.create(cr, uid, {
'name': line.name,
'product_uom': line.product_id.uom_id.id,
'product_uos': line.product_id.uom_id.id,
'picking_id': picking_id,
'picking_type_id': picking_type.id,
'product_id': line.product_id.id,
'product_uos_qty': abs(line.qty),
'product_uom_qty': abs(line.qty),
'state': 'draft',
'location_id': location_id if line.qty >= 0 else destination_id,
'location_dest_id': destination_id if line.qty >= 0 else location_id,
}, context=context))
if picking_id:
picking_obj.action_confirm(cr, uid, [picking_id], context=context)
picking_obj.force_assign(cr, uid, [picking_id], context=context)
picking_obj.action_done(cr, uid, [picking_id], context=context)
elif move_list:
move_obj.action_confirm(cr, uid, move_list, context=context)
move_obj.force_assign(cr, uid, move_list, context=context)
move_obj.action_done(cr, uid, move_list, context=context)
return True
def cancel_order(self, cr, uid, ids, context=None):
""" Changes order state to cancel
@return: True
"""
stock_picking_obj = self.pool.get('stock.picking')
for order in self.browse(cr, uid, ids, context=context):
stock_picking_obj.action_cancel(cr, uid, [order.picking_id.id])
if stock_picking_obj.browse(cr, uid, order.picking_id.id, context=context).state <> 'cancel':
raise osv.except_osv(_('Error!'), _('Unable to cancel the picking.'))
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def add_payment(self, cr, uid, order_id, data, context=None):
"""Create a new payment for the order"""
context = dict(context or {})
statement_line_obj = self.pool.get('account.bank.statement.line')
property_obj = self.pool.get('ir.property')
order = self.browse(cr, uid, order_id, context=context)
date = data.get('payment_date', time.strftime('%Y-%m-%d'))
if len(date) > 10:
timestamp = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
date = ts.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
args = {
'amount': data['amount'],
'date': date,
'name': order.name + ': ' + (data.get('payment_name', '') or ''),
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False,
}
journal_id = data.get('journal', False)
statement_id = data.get('statement_id', False)
assert journal_id or statement_id, "No statement_id or journal_id passed to the method!"
journal = self.pool['account.journal'].browse(cr, uid, journal_id, context=context)
# use the company of the journal and not of the current user
company_cxt = dict(context, force_company=journal.company_id.id)
account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=company_cxt)
args['account_id'] = (order.partner_id and order.partner_id.property_account_receivable \
and order.partner_id.property_account_receivable.id) or (account_def and account_def.id) or False
if not args['account_id']:
if not args['partner_id']:
msg = _('There is no receivable account defined to make payment.')
else:
msg = _('There is no receivable account defined to make payment for the partner: "%s" (id:%d).') % (order.partner_id.name, order.partner_id.id,)
raise osv.except_osv(_('Configuration Error!'), msg)
context.pop('pos_session_id', False)
for statement in order.session_id.statement_ids:
if statement.id == statement_id:
journal_id = statement.journal_id.id
break
elif statement.journal_id.id == journal_id:
statement_id = statement.id
break
if not statement_id:
raise osv.except_osv(_('Error!'), _('You have to open at least one cashbox.'))
args.update({
'statement_id': statement_id,
'pos_statement_id': order_id,
'journal_id': journal_id,
'ref': order.session_id.name,
})
statement_line_obj.create(cr, uid, args, context=context)
return statement_id
def refund(self, cr, uid, ids, context=None):
"""Create a copy of order for refund order"""
clone_list = []
line_obj = self.pool.get('pos.order.line')
for order in self.browse(cr, uid, ids, context=context):
current_session_ids = self.pool.get('pos.session').search(cr, uid, [
('state', '!=', 'closed'),
('user_id', '=', uid)], context=context)
if not current_session_ids:
raise osv.except_osv(_('Error!'), _('To return product(s), you need to open a session that will be used to register the refund.'))
clone_id = self.copy(cr, uid, order.id, {
'name': order.name + ' REFUND', # not used, name forced by create
'session_id': current_session_ids[0],
'date_order': time.strftime('%Y-%m-%d %H:%M:%S'),
}, context=context)
clone_list.append(clone_id)
for clone in self.browse(cr, uid, clone_list, context=context):
for order_line in clone.lines:
line_obj.write(cr, uid, [order_line.id], {
'qty': -order_line.qty
}, context=context)
abs = {
'name': _('Return Products'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.order',
'res_id':clone_list[0],
'view_id': False,
'context':context,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
}
return abs
def action_invoice_state(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state':'invoiced'}, context=context)
def action_invoice(self, cr, uid, ids, context=None):
inv_ref = self.pool.get('account.invoice')
inv_line_ref = self.pool.get('account.invoice.line')
product_obj = self.pool.get('product.product')
inv_ids = []
for order in self.pool.get('pos.order').browse(cr, uid, ids, context=context):
if order.invoice_id:
inv_ids.append(order.invoice_id.id)
continue
if not order.partner_id:
raise osv.except_osv(_('Error!'), _('Please provide a partner for the sale.'))
acc = order.partner_id.property_account_receivable.id
inv = {
'name': order.name,
'origin': order.name,
'account_id': acc,
'journal_id': order.sale_journal.id or None,
'type': 'out_invoice',
'reference': order.name,
'partner_id': order.partner_id.id,
'comment': order.note or '',
'currency_id': order.pricelist_id.currency_id.id, # considering partner's sale pricelist's currency
}
inv.update(inv_ref.onchange_partner_id(cr, uid, [], 'out_invoice', order.partner_id.id)['value'])
if not inv.get('account_id', None):
inv['account_id'] = acc
inv_id = inv_ref.create(cr, uid, inv, context=context)
self.write(cr, uid, [order.id], {'invoice_id': inv_id, 'state': 'invoiced'}, context=context)
inv_ids.append(inv_id)
for line in order.lines:
inv_line = {
'invoice_id': inv_id,
'product_id': line.product_id.id,
'quantity': line.qty,
}
inv_name = product_obj.name_get(cr, uid, [line.product_id.id], context=context)[0][1]
inv_line.update(inv_line_ref.product_id_change(cr, uid, [],
line.product_id.id,
line.product_id.uom_id.id,
line.qty, partner_id = order.partner_id.id,
fposition_id=order.partner_id.property_account_position.id)['value'])
if not inv_line.get('account_analytic_id', False):
inv_line['account_analytic_id'] = \
self._prepare_analytic_account(cr, uid, line,
context=context)
inv_line['price_unit'] = line.price_unit
inv_line['discount'] = line.discount
inv_line['name'] = inv_name
inv_line['invoice_line_tax_id'] = [(6, 0, inv_line['invoice_line_tax_id'])]
inv_line_ref.create(cr, uid, inv_line, context=context)
inv_ref.button_reset_taxes(cr, uid, [inv_id], context=context)
self.signal_workflow(cr, uid, [order.id], 'invoice')
inv_ref.signal_workflow(cr, uid, [inv_id], 'validate')
if not inv_ids: return {}
mod_obj = self.pool.get('ir.model.data')
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False
return {
'name': _('Customer Invoice'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def create_account_move(self, cr, uid, ids, context=None):
return self._create_account_move_line(cr, uid, ids, None, None, context=context)
def _prepare_analytic_account(self, cr, uid, line, context=None):
'''This method is designed to be inherited in a custom module'''
return False
def _create_account_move(self, cr, uid, dt, ref, journal_id, company_id, context=None):
local_context = dict(context or {}, company_id=company_id)
start_at_datetime = datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_tz_user = fields.datetime.context_timestamp(cr, uid, start_at_datetime, context=context)
date_tz_user = date_tz_user.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
period_id = self.pool['account.period'].find(cr, uid, dt=date_tz_user, context=local_context)
return self.pool['account.move'].create(cr, uid, {'ref': ref, 'journal_id': journal_id, 'period_id': period_id[0]}, context=context)
def _create_account_move_line(self, cr, uid, ids, session=None, move_id=None, context=None):
# Tricky, via the workflow, we only have one id in the ids variable
"""Create a account move line of order grouped by products or not."""
account_move_obj = self.pool.get('account.move')
account_period_obj = self.pool.get('account.period')
account_tax_obj = self.pool.get('account.tax')
property_obj = self.pool.get('ir.property')
cur_obj = self.pool.get('res.currency')
#session_ids = set(order.session_id for order in self.browse(cr, uid, ids, context=context))
if session and not all(session.id == order.session_id.id for order in self.browse(cr, uid, ids, context=context)):
raise osv.except_osv(_('Error!'), _('Selected orders do not have the same session!'))
grouped_data = {}
have_to_group_by = session and session.config_id.group_by or False
def compute_tax(amount, tax, line):
if amount > 0:
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = -line.price_subtotal * tax['ref_base_sign']
return (tax_code_id, tax_amount,)
for order in self.browse(cr, uid, ids, context=context):
if order.account_move:
continue
if order.state != 'paid':
continue
current_company = order.sale_journal.company_id
group_tax = {}
account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=context)
order_account = order.partner_id and \
order.partner_id.property_account_receivable and \
order.partner_id.property_account_receivable.id or \
account_def and account_def.id
if move_id is None:
# Create an entry for the sale
move_id = self._create_account_move(cr, uid, order.session_id.start_at, order.name, order.sale_journal.id, order.company_id.id, context=context)
move = account_move_obj.browse(cr, uid, move_id, context=context)
def insert_data(data_type, values):
# if have_to_group_by:
sale_journal_id = order.sale_journal.id
# 'quantity': line.qty,
# 'product_id': line.product_id.id,
values.update({
'date': order.date_order[:10],
'ref': order.name,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False,
'journal_id' : sale_journal_id,
'period_id': move.period_id.id,
'move_id' : move_id,
'company_id': current_company.id,
})
if data_type == 'product':
key = ('product', values['partner_id'], values['product_id'], values['analytic_account_id'], values['debit'] > 0)
elif data_type == 'tax':
key = ('tax', values['partner_id'], values['tax_code_id'], values['debit'] > 0)
elif data_type == 'counter_part':
key = ('counter_part', values['partner_id'], values['account_id'], values['debit'] > 0)
else:
return
grouped_data.setdefault(key, [])
# if not have_to_group_by or (not grouped_data[key]):
# grouped_data[key].append(values)
# else:
# pass
if have_to_group_by:
if not grouped_data[key]:
grouped_data[key].append(values)
else:
for line in grouped_data[key]:
if line.get('tax_code_id') == values.get('tax_code_id'):
current_value = line
current_value['quantity'] = current_value.get('quantity', 0.0) + values.get('quantity', 0.0)
current_value['credit'] = current_value.get('credit', 0.0) + values.get('credit', 0.0)
current_value['debit'] = current_value.get('debit', 0.0) + values.get('debit', 0.0)
current_value['tax_amount'] = current_value.get('tax_amount', 0.0) + values.get('tax_amount', 0.0)
break
else:
grouped_data[key].append(values)
else:
grouped_data[key].append(values)
#because of the weird way the pos order is written, we need to make sure there is at least one line,
#because just after the 'for' loop there are references to 'line' and 'income_account' variables (that
#are set inside the for loop)
#TOFIX: a deep refactoring of this method (and class!) is needed in order to get rid of this stupid hack
assert order.lines, _('The POS order must have lines when calling this method')
# Create an move for each order line
cur = order.pricelist_id.currency_id
round_per_line = True
if order.company_id.tax_calculation_rounding_method == 'round_globally':
round_per_line = False
for line in order.lines:
tax_amount = 0
taxes = []
for t in line.product_id.taxes_id:
if t.company_id.id == current_company.id:
taxes.append(t)
computed_taxes = account_tax_obj.compute_all(cr, uid, taxes, line.price_unit * (100.0-line.discount) / 100.0, line.qty)['taxes']
for tax in computed_taxes:
tax_amount += cur_obj.round(cr, uid, cur, tax['amount']) if round_per_line else tax['amount']
if tax_amount < 0:
group_key = (tax['ref_tax_code_id'], tax['base_code_id'], tax['account_collected_id'], tax['id'])
else:
group_key = (tax['tax_code_id'], tax['base_code_id'], tax['account_collected_id'], tax['id'])
group_tax.setdefault(group_key, 0)
group_tax[group_key] += cur_obj.round(cr, uid, cur, tax['amount']) if round_per_line else tax['amount']
amount = line.price_subtotal
# Search for the income account
if line.product_id.property_account_income.id:
income_account = line.product_id.property_account_income.id
elif line.product_id.categ_id.property_account_income_categ.id:
income_account = line.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('Please define income '\
'account for this product: "%s" (id:%d).') \
% (line.product_id.name, line.product_id.id, ))
# Empty the tax list as long as there is no tax code:
tax_code_id = False
tax_amount = 0
while computed_taxes:
tax = computed_taxes.pop(0)
tax_code_id, tax_amount = compute_tax(amount, tax, line)
# If there is one we stop
if tax_code_id:
break
# Create a move for the line
insert_data('product', {
'name': line.product_id.name,
'quantity': line.qty,
'product_id': line.product_id.id,
'account_id': income_account,
'analytic_account_id': self._prepare_analytic_account(cr, uid, line, context=context),
'credit': ((amount>0) and amount) or 0.0,
'debit': ((amount<0) and -amount) or 0.0,
'tax_code_id': tax_code_id,
'tax_amount': tax_amount,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False
})
# For each remaining tax with a code, whe create a move line
for tax in computed_taxes:
tax_code_id, tax_amount = compute_tax(amount, tax, line)
if not tax_code_id:
continue
insert_data('tax', {
'name': _('Tax'),
'product_id':line.product_id.id,
'quantity': line.qty,
'account_id': income_account,
'credit': 0.0,
'debit': 0.0,
'tax_code_id': tax_code_id,
'tax_amount': tax_amount,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False
})
# Create a move for each tax group
(tax_code_pos, base_code_pos, account_pos, tax_id)= (0, 1, 2, 3)
for key, tax_amount in group_tax.items():
tax = self.pool.get('account.tax').browse(cr, uid, key[tax_id], context=context)
insert_data('tax', {
'name': _('Tax') + ' ' + tax.name,
'quantity': line.qty,
'product_id': line.product_id.id,
'account_id': key[account_pos] or income_account,
'credit': ((tax_amount>0) and tax_amount) or 0.0,
'debit': ((tax_amount<0) and -tax_amount) or 0.0,
'tax_code_id': key[tax_code_pos],
'tax_amount': tax_amount,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False
})
# counterpart
insert_data('counter_part', {
'name': _("Trade Receivables"), #order.name,
'account_id': order_account,
'credit': ((order.amount_total < 0) and -order.amount_total) or 0.0,
'debit': ((order.amount_total > 0) and order.amount_total) or 0.0,
'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False
})
order.write({'state':'done', 'account_move': move_id})
all_lines = []
for group_key, group_data in grouped_data.iteritems():
for value in group_data:
all_lines.append((0, 0, value),)
if move_id: #In case no order was changed
self.pool.get("account.move").write(cr, uid, [move_id], {'line_id':all_lines}, context=context)
return True
def action_payment(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'payment'}, context=context)
def action_paid(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'paid'}, context=context)
self.create_picking(cr, uid, ids, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def action_done(self, cr, uid, ids, context=None):
self.create_account_move(cr, uid, ids, context=context)
return True
class account_bank_statement(osv.osv):
_inherit = 'account.bank.statement'
_columns= {
'user_id': fields.many2one('res.users', 'User', readonly=True),
}
_defaults = {
'user_id': lambda self,cr,uid,c={}: uid
}
class account_bank_statement_line(osv.osv):
_inherit = 'account.bank.statement.line'
_columns= {
'pos_statement_id': fields.many2one('pos.order', ondelete='cascade'),
}
class pos_order_line(osv.osv):
_name = "pos.order.line"
_description = "Lines of Point of Sale"
_rec_name = "product_id"
def _amount_line_all(self, cr, uid, ids, field_names, arg, context=None):
res = dict([(i, {}) for i in ids])
account_tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
taxes_ids = [ tax for tax in line.product_id.taxes_id if tax.company_id.id == line.order_id.company_id.id ]
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = account_tax_obj.compute_all(cr, uid, taxes_ids, price, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
cur = line.order_id.pricelist_id.currency_id
res[line.id]['price_subtotal'] = taxes['total']
res[line.id]['price_subtotal_incl'] = taxes['total_included']
return res
def onchange_product_id(self, cr, uid, ids, pricelist, product_id, qty=0, partner_id=False, context=None):
context = context or {}
if not product_id:
return {}
if not pricelist:
raise osv.except_osv(_('No Pricelist!'),
_('You have to select a pricelist in the sale form !\n' \
'Please set one before choosing a product.'))
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product_id, qty or 1.0, partner_id)[pricelist]
result = self.onchange_qty(cr, uid, ids, product_id, 0.0, qty, price, context=context)
result['value']['price_unit'] = price
return result
def onchange_qty(self, cr, uid, ids, product, discount, qty, price_unit, context=None):
result = {}
if not product:
return result
account_tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
prod = self.pool.get('product.product').browse(cr, uid, product, context=context)
price = price_unit * (1 - (discount or 0.0) / 100.0)
taxes = account_tax_obj.compute_all(cr, uid, prod.taxes_id, price, qty, product=prod, partner=False)
result['price_subtotal'] = taxes['total']
result['price_subtotal_incl'] = taxes['total_included']
return {'value': result}
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True),
'name': fields.char('Line No', required=True, copy=False),
'notice': fields.char('Discount Notice'),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], required=True, change_default=True),
'price_unit': fields.float(string='Unit Price', digits_compute=dp.get_precision('Product Price')),
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product UoS')),
'price_subtotal': fields.function(_amount_line_all, multi='pos_order_line_amount', digits_compute=dp.get_precision('Product Price'), string='Subtotal w/o Tax', store=True),
'price_subtotal_incl': fields.function(_amount_line_all, multi='pos_order_line_amount', digits_compute=dp.get_precision('Account'), string='Subtotal', store=True),
'discount': fields.float('Discount (%)', digits_compute=dp.get_precision('Account')),
'order_id': fields.many2one('pos.order', 'Order Ref', ondelete='cascade'),
'create_date': fields.datetime('Creation Date', readonly=True),
}
_defaults = {
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'pos.order.line', context=context),
'qty': lambda *a: 1,
'discount': lambda *a: 0.0,
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
class ean_wizard(osv.osv_memory):
_name = 'pos.ean_wizard'
_columns = {
'ean13_pattern': fields.char('Reference', size=13, required=True, translate=True),
}
def sanitize_ean13(self, cr, uid, ids, context):
for r in self.browse(cr,uid,ids):
ean13 = openerp.addons.product.product.sanitize_ean13(r.ean13_pattern)
m = context.get('active_model')
m_id = context.get('active_id')
self.pool[m].write(cr,uid,[m_id],{'ean13':ean13})
return { 'type' : 'ir.actions.act_window_close' }
class pos_category(osv.osv):
_name = "pos.category"
_description = "Public Category"
_order = "sequence, name"
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
res = []
for cat in self.browse(cr, uid, ids, context=context):
names = [cat.name]
pcat = cat.parent_id
while pcat:
names.append(pcat.name)
pcat = pcat.parent_id
res.append((cat.id, ' / '.join(reversed(names))))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('pos.category','Parent Category', select=True),
'child_id': fields.one2many('pos.category', 'parent_id', string='Children Categories'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."),
# NOTE: there is no 'default image', because by default we don't show thumbnails for categories. However if we have a thumbnail
# for at least one category, then we display a default image on the other, so that the buttons have consistent styling.
# In this case, the default image is set by the js code.
# NOTE2: image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the cateogry, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'pos.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the category. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized image", type="binary", multi="_get_image",
store={
'pos.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the category. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class product_template(osv.osv):
_inherit = 'product.template'
_columns = {
'income_pdt': fields.boolean('Point of Sale Cash In', help="Check if, this is a product you can use to put cash into a statement for the point of sale backend."),
'expense_pdt': fields.boolean('Point of Sale Cash Out', help="Check if, this is a product you can use to take cash from a statement for the point of sale backend, example: money lost, transfer to bank, etc."),
'available_in_pos': fields.boolean('Available in the Point of Sale', help='Check if you want this product to appear in the Point of Sale'),
'to_weight' : fields.boolean('To Weigh With Scale', help="Check if the product should be weighted using the hardware scale integration"),
'pos_categ_id': fields.many2one('pos.category','Point of Sale Category', help="Those categories are used to group similar products for point of sale."),
}
_defaults = {
'to_weight' : False,
'available_in_pos': True,
}
def unlink(self, cr, uid, ids, context=None):
product_ctx = dict(context or {}, active_test=False)
if self.search_count(cr, uid, [('id', 'in', ids), ('available_in_pos', '=', True)], context=product_ctx):
if self.pool['pos.session'].search_count(cr, uid, [('state', '!=', 'closed')], context=context):
raise osv.except_osv(_('Error!'),
_('You cannot delete a product saleable in point of sale while a session is still opened.'))
return super(product_template, self).unlink(cr, uid, ids, context=context)
class res_partner(osv.osv):
_inherit = 'res.partner'
def create_from_ui(self, cr, uid, partner, context=None):
""" create or modify a partner from the point of sale ui.
partner contains the partner's fields. """
#image is a dataurl, get the data after the comma
if partner.get('image',False):
img = partner['image'].split(',')[1]
partner['image'] = img
if partner.get('id',False): # Modifying existing partner
partner_id = partner['id']
del partner['id']
self.write(cr, uid, [partner_id], partner, context=context)
else:
partner_id = self.create(cr, uid, partner, context=context)
return partner_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jaxlaw/hadoop-common
|
refs/heads/yahoo-hadoop-0.20
|
src/contrib/hod/hodlib/Common/xmlrpc.py
|
182
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import xmlrpclib, time, random, signal
from hodlib.Common.util import hodInterrupt, HodInterruptException
class hodXRClient(xmlrpclib.ServerProxy):
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0, installSignalHandlers=1, retryRequests=True, timeOut=15):
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose,
allow_none)
self.__retryRequests = retryRequests
self.__timeOut = timeOut
if (installSignalHandlers!=0):
self.__set_alarm()
def __set_alarm(self):
def alarm_handler(sigNum, sigHandler):
raise Exception("XML-RPC socket timeout.")
signal.signal(signal.SIGALRM, alarm_handler)
def __request(self, methodname, params):
response = None
retryWaitTime = 5 + random.randint(0, 5)
for i in range(0, 30):
signal.alarm(self.__timeOut)
try:
response = self._ServerProxy__request(methodname, params)
signal.alarm(0)
break
except Exception:
if self.__retryRequests:
if hodInterrupt.isSet():
raise HodInterruptException()
time.sleep(retryWaitTime)
else:
raise Exception("hodXRClientTimeout")
return response
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
|
f4nt/gmecol
|
refs/heads/master
|
gmecol/migrations/0005_auto_20140926_2339.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gmecol', '0004_auto_20140926_0130'),
]
operations = [
migrations.AlterModelOptions(
name='usergame',
options={'ordering': ('game__name',)},
),
]
|
skycucumber/restful
|
refs/heads/master
|
python/venv/lib/python2.7/site-packages/werkzeug/_compat.py
|
148
|
import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return to_native(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
|
pauloricardomg/cassandra
|
refs/heads/trunk
|
pylib/cqlshlib/copyutil.py
|
4
|
# cython: profile=True
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import csv
import datetime
import json
import glob
import multiprocessing as mp
import os
import platform
import random
import re
import signal
import six
import struct
import sys
import threading
import time
import traceback
from bisect import bisect_right
from calendar import timegm
from collections import defaultdict, namedtuple
from decimal import Decimal
from random import randint
from io import BytesIO, StringIO
from select import select
from uuid import UUID
from .util import profile_on, profile_off
from six import ensure_str, ensure_text
from six.moves import configparser
from six.moves import range
from six.moves.queue import Queue
from cassandra import OperationTimedOut
from cassandra.cluster import Cluster, DefaultConnection
from cassandra.cqltypes import ReversedType, UserType, BytesType, VarcharType
from cassandra.metadata import protect_name, protect_names, protect_value
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, FallthroughRetryPolicy
from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
from cassandra.util import Date, Time
from cqlshlib.util import profile_on, profile_off
from cqlshlib.cql3handling import CqlRuleSet
from cqlshlib.displaying import NO_COLOR_MAP
from cqlshlib.formatting import format_value_default, CqlType, DateTimeFormat, EMPTY, get_formatter, BlobType
from cqlshlib.sslhandling import ssl_settings
PROFILE_ON = False
STRACE_ON = False
DEBUG = False # This may be set to True when initializing the task
IS_LINUX = platform.system() == 'Linux'
CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
def safe_normpath(fname):
"""
:return the normalized path but only if there is a filename, we don't want to convert
an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path
"""
return os.path.normpath(os.path.expanduser(fname)) if fname else fname
def printdebugmsg(msg):
if DEBUG:
printmsg(msg)
def printmsg(msg, eol='\n', encoding='utf8'):
sys.stdout.write(msg)
sys.stdout.write(eol)
sys.stdout.flush()
# Keep arguments in sync with printmsg
def swallowmsg(msg, eol='', encoding=''):
None
class OneWayPipe(object):
"""
A one way pipe protected by two process level locks, one for reading and one for writing.
"""
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
def send(self, obj):
with self.wlock:
self.writer.send(obj)
def recv(self):
with self.rlock:
return self.reader.recv()
def close(self):
self.reader.close()
self.writer.close()
class ReceivingChannel(object):
"""
A one way channel that wraps a pipe to receive messages.
"""
def __init__(self, pipe):
self.pipe = pipe
def recv(self):
return self.pipe.recv()
def close(self):
self.pipe.close()
class SendingChannel(object):
"""
A one way channel that wraps a pipe and provides a feeding thread to send messages asynchronously.
"""
def __init__(self, pipe):
self.pipe = pipe
self.pending_messages = Queue()
def feed():
while True:
try:
msg = self.pending_messages.get()
self.pipe.send(msg)
except Exception as e:
printmsg('%s: %s' % (e.__class__.__name__, e.message if hasattr(e, 'message') else str(e)))
feeding_thread = threading.Thread(target=feed)
feeding_thread.setDaemon(True)
feeding_thread.start()
def send(self, obj):
self.pending_messages.put(obj)
def num_pending(self):
return self.pending_messages.qsize() if self.pending_messages else 0
def close(self):
self.pipe.close()
class SendingChannels(object):
"""
A group of one way channels for sending messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in range(num_channels)]
self.channels = [SendingChannel(p) for p in self.pipes]
self.num_channels = num_channels
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class ReceivingChannels(object):
"""
A group of one way channels for receiving messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in range(num_channels)]
self.channels = [ReceivingChannel(p) for p in self.pipes]
self._readers = [p.reader for p in self.pipes]
self._rlocks = [p.rlock for p in self.pipes]
self._rlocks_by_readers = dict([(p.reader, p.rlock) for p in self.pipes])
self.num_channels = num_channels
self.recv = self.recv_select if IS_LINUX else self.recv_polling
def recv_select(self, timeout):
"""
Implementation of the recv method for Linux, where select is available. Receive an object from
all pipes that are ready for reading without blocking.
"""
readable, _, _ = select(self._readers, [], [], timeout)
for r in readable:
with self._rlocks_by_readers[r]:
try:
yield r.recv()
except EOFError:
continue
def recv_polling(self, timeout):
"""
Implementation of the recv method for platforms where select() is not available for pipes.
We poll on all of the readers with a very small timeout. We stop when the timeout specified
has been received but we may exceed it since we check all processes during each sweep.
"""
start = time.time()
while True:
for i, r in enumerate(self._readers):
with self._rlocks[i]:
if r.poll(0.000000001):
try:
yield r.recv()
except EOFError:
continue
if time.time() - start > timeout:
break
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class CopyTask(object):
"""
A base class for ImportTask and ExportTask
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction):
self.shell = shell
self.ks = ks
self.table = table
self.table_meta = self.shell.get_table_meta(self.ks, self.table)
self.host = shell.conn.get_control_connection_host()
self.fname = safe_normpath(fname)
self.protocol_version = protocol_version
self.config_file = config_file
# if cqlsh is invoked with --debug then set the global debug flag to True
if shell.debug:
global DEBUG
DEBUG = True
# do not display messages when exporting to STDOUT unless --debug is set
self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \
else swallowmsg
self.options = self.parse_options(opts, direction)
self.num_processes = self.options.copy['numprocesses']
self.encoding = self.options.copy['encoding']
self.printmsg('Using %d child processes' % (self.num_processes,))
if direction == 'from':
self.num_processes += 1 # add the feeder process
self.processes = []
self.inmsg = ReceivingChannels(self.num_processes)
self.outmsg = SendingChannels(self.num_processes)
self.columns = CopyTask.get_columns(shell, ks, table, columns)
self.time_start = time.time()
def maybe_read_config_file(self, opts, direction):
"""
Read optional sections from a configuration file that was specified in the command options or from the default
cqlshrc configuration file if none was specified.
"""
config_file = opts.pop('configfile', '')
if not config_file:
config_file = self.config_file
if not os.path.isfile(config_file):
return opts
configs = configparser.RawConfigParser()
configs.readfp(open(config_file))
ret = dict()
config_sections = list(['copy', 'copy-%s' % (direction,),
'copy:%s.%s' % (self.ks, self.table),
'copy-%s:%s.%s' % (direction, self.ks, self.table)])
for section in config_sections:
if configs.has_section(section):
options = dict(configs.items(section))
self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options))
ret.update(options)
# Update this last so the command line options take precedence over the configuration file options
if opts:
self.printmsg("Reading options from the command line: %s" % (opts,))
ret.update(opts)
if self.shell.debug: # this is important for testing, do not remove
self.printmsg("Using options: '%s'" % (ret,))
return ret
@staticmethod
def clean_options(opts):
"""
Convert all option values to valid string literals unless they are path names
"""
return dict([(k, v if k not in ['errfile', 'ratefile'] else v)
for k, v, in opts.items()])
def parse_options(self, opts, direction):
"""
Parse options for import (COPY FROM) and export (COPY TO) operations.
Extract from opts csv and dialect options.
:return: 3 dictionaries: the csv options, the dialect options, any unrecognized options.
"""
shell = self.shell
opts = self.clean_options(self.maybe_read_config_file(opts, direction))
dialect_options = dict()
dialect_options['quotechar'] = ensure_str(opts.pop('quote', '"'))
dialect_options['escapechar'] = ensure_str(opts.pop('escape', '\\'))
dialect_options['delimiter'] = ensure_str(opts.pop('delimiter', ','))
if dialect_options['quotechar'] == dialect_options['escapechar']:
dialect_options['doublequote'] = True
del dialect_options['escapechar']
else:
dialect_options['doublequote'] = False
copy_options = dict()
copy_options['nullval'] = ensure_str(opts.pop('null', ''))
copy_options['header'] = bool(opts.pop('header', '').lower() == 'true')
copy_options['encoding'] = opts.pop('encoding', 'utf8')
copy_options['maxrequests'] = int(opts.pop('maxrequests', 6))
copy_options['pagesize'] = int(opts.pop('pagesize', 1000))
# by default the page timeout is 10 seconds per 1000 entries
# in the page size or 10 seconds if pagesize is smaller
copy_options['pagetimeout'] = int(opts.pop('pagetimeout', max(10, 10 * (copy_options['pagesize'] / 1000))))
copy_options['maxattempts'] = int(opts.pop('maxattempts', 5))
copy_options['dtformats'] = DateTimeFormat(opts.pop('datetimeformat', shell.display_timestamp_format),
shell.display_date_format, shell.display_nanotime_format,
milliseconds_only=True)
copy_options['floatprecision'] = int(opts.pop('floatprecision', '5'))
copy_options['doubleprecision'] = int(opts.pop('doubleprecision', '12'))
copy_options['chunksize'] = int(opts.pop('chunksize', 5000))
copy_options['ingestrate'] = int(opts.pop('ingestrate', 100000))
copy_options['maxbatchsize'] = int(opts.pop('maxbatchsize', 20))
copy_options['minbatchsize'] = int(opts.pop('minbatchsize', 10))
copy_options['reportfrequency'] = float(opts.pop('reportfrequency', 0.25))
copy_options['consistencylevel'] = shell.consistency_level
copy_options['decimalsep'] = opts.pop('decimalsep', '.')
copy_options['thousandssep'] = opts.pop('thousandssep', '')
copy_options['boolstyle'] = [ensure_str(s.strip()) for s in opts.pop('boolstyle', 'True, False').split(',')]
copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16)))
copy_options['begintoken'] = opts.pop('begintoken', '')
copy_options['endtoken'] = opts.pop('endtoken', '')
copy_options['maxrows'] = int(opts.pop('maxrows', '-1'))
copy_options['skiprows'] = int(opts.pop('skiprows', '0'))
copy_options['skipcols'] = opts.pop('skipcols', '')
copy_options['maxparseerrors'] = int(opts.pop('maxparseerrors', '-1'))
copy_options['maxinserterrors'] = int(opts.pop('maxinserterrors', '1000'))
copy_options['errfile'] = safe_normpath(opts.pop('errfile', 'import_%s_%s.err' % (self.ks, self.table,)))
copy_options['ratefile'] = safe_normpath(opts.pop('ratefile', ''))
copy_options['maxoutputsize'] = int(opts.pop('maxoutputsize', '-1'))
copy_options['preparedstatements'] = bool(opts.pop('preparedstatements', 'true').lower() == 'true')
copy_options['ttl'] = int(opts.pop('ttl', -1))
# Hidden properties, they do not appear in the documentation but can be set in config files
# or on the cmd line but w/o completion
copy_options['maxinflightmessages'] = int(opts.pop('maxinflightmessages', '512'))
copy_options['maxbackoffattempts'] = int(opts.pop('maxbackoffattempts', '12'))
copy_options['maxpendingchunks'] = int(opts.pop('maxpendingchunks', '24'))
# set requesttimeout to a value high enough so that maxbatchsize rows will never timeout if the server
# responds: here we set it to 1 sec per 10 rows but no less than 60 seconds
copy_options['requesttimeout'] = int(opts.pop('requesttimeout', max(60, 1 * copy_options['maxbatchsize'] / 10)))
# set childtimeout higher than requesttimeout so that child processes have a chance to report request timeouts
copy_options['childtimeout'] = int(opts.pop('childtimeout', copy_options['requesttimeout'] + 30))
self.check_options(copy_options)
return CopyOptions(copy=copy_options, dialect=dialect_options, unrecognized=opts)
@staticmethod
def check_options(copy_options):
"""
Check any options that require a sanity check beyond a simple type conversion and if required
raise a value error:
- boolean styles must be exactly 2, they must be different and they cannot be empty
"""
bool_styles = copy_options['boolstyle']
if len(bool_styles) != 2 or bool_styles[0] == bool_styles[1] or not bool_styles[0] or not bool_styles[1]:
raise ValueError("Invalid boolean styles %s" % copy_options['boolstyle'])
@staticmethod
def get_num_processes(cap):
"""
Pick a reasonable number of child processes. We need to leave at
least one core for the parent or feeder process.
"""
return max(1, min(cap, CopyTask.get_num_cores() - 1))
@staticmethod
def get_num_cores():
"""
Return the number of cores if available. If the test environment variable
is set, then return the number carried by this variable. This is to test single-core
machine more easily.
"""
try:
num_cores_for_testing = os.environ.get('CQLSH_COPY_TEST_NUM_CORES', '')
ret = int(num_cores_for_testing) if num_cores_for_testing else mp.cpu_count()
printdebugmsg("Detected %d core(s)" % (ret,))
return ret
except NotImplementedError:
printdebugmsg("Failed to detect number of cores, returning 1")
return 1
@staticmethod
def describe_interval(seconds):
desc = []
for length, unit in ((86400, 'day'), (3600, 'hour'), (60, 'minute')):
num = int(seconds) / length
if num > 0:
desc.append('%d %s' % (num, unit))
if num > 1:
desc[-1] += 's'
seconds %= length
words = '%.03f seconds' % seconds
if len(desc) > 1:
words = ', '.join(desc) + ', and ' + words
elif len(desc) == 1:
words = desc[0] + ' and ' + words
return words
@staticmethod
def get_columns(shell, ks, table, columns):
"""
Return all columns if none were specified or only the columns specified.
Possible enhancement: introduce a regex like syntax (^) to allow users
to specify all columns except a few.
"""
return shell.get_column_names(ks, table) if not columns else columns
def close(self):
self.stop_processes()
self.inmsg.close()
self.outmsg.close()
def num_live_processes(self):
return sum(1 for p in self.processes if p.is_alive())
@staticmethod
def get_pid():
return os.getpid() if hasattr(os, 'getpid') else None
@staticmethod
def trace_process(pid):
if pid and STRACE_ON:
os.system("strace -vvvv -c -o strace.{pid}.out -e trace=all -p {pid}&".format(pid=pid))
def start_processes(self):
for i, process in enumerate(self.processes):
process.start()
self.trace_process(process.pid)
self.trace_process(self.get_pid())
def stop_processes(self):
for process in self.processes:
process.terminate()
def make_params(self):
"""
Return a dictionary of parameters to be used by the worker processes.
On platforms using 'spawn' as the default multiprocessing start method,
this dictionary must be picklable.
"""
shell = self.shell
return dict(ks=self.ks,
table=self.table,
local_dc=self.host.datacenter,
columns=self.columns,
options=self.options,
connect_timeout=shell.conn.connect_timeout,
hostname=self.host.address,
port=shell.port,
ssl=shell.ssl,
auth_provider=shell.auth_provider,
cql_version=shell.conn.cql_version,
config_file=self.config_file,
protocol_version=self.protocol_version,
debug=shell.debug,
coverage=shell.coverage,
coveragerc_path=shell.coveragerc_path
)
def validate_columns(self):
shell = self.shell
if not self.columns:
shell.printerr("No column specified")
return False
for c in self.columns:
if c not in self.table_meta.columns:
shell.printerr('Invalid column name %s' % (c,))
return False
return True
def update_params(self, params, i):
"""
Add the communication pipes to the parameters to be passed to the worker process:
inpipe is the message pipe flowing from parent to child process, so outpipe from the parent point
of view and, vice-versa, outpipe is the message pipe flowing from child to parent, so inpipe
from the parent point of view, hence the two are swapped below.
"""
params['inpipe'] = self.outmsg.pipes[i]
params['outpipe'] = self.inmsg.pipes[i]
return params
class ExportWriter(object):
"""
A class that writes to one or more csv files, or STDOUT
"""
def __init__(self, fname, shell, columns, options):
self.fname = fname
self.shell = shell
self.columns = columns
self.options = options
self.header = options.copy['header']
self.max_output_size = int(options.copy['maxoutputsize'])
self.current_dest = None
self.num_files = 0
if self.max_output_size > 0:
if fname is not None:
self.write = self._write_with_split
self.num_written = 0
else:
shell.printerr("WARNING: maxoutputsize {} ignored when writing to STDOUT".format(self.max_output_size))
self.write = self._write_without_split
else:
self.write = self._write_without_split
def open(self):
self.current_dest = self._get_dest(self.fname)
if self.current_dest is None:
return False
if self.header:
writer = csv.writer(self.current_dest.output, **self.options.dialect)
writer.writerow(self.columns)
return True
def close(self):
self._close_current_dest()
def _next_dest(self):
self._close_current_dest()
self.current_dest = self._get_dest(self.fname + '.%d' % (self.num_files,))
def _get_dest(self, source_name):
"""
Open the output file if any or else use stdout. Return a namedtuple
containing the out and a boolean indicating if the output should be closed.
"""
CsvDest = namedtuple('CsvDest', 'output close')
if self.fname is None:
return CsvDest(output=sys.stdout, close=False)
else:
try:
ret = CsvDest(output=open(source_name, 'w'), close=True)
self.num_files += 1
return ret
except IOError as e:
self.shell.printerr("Can't open %r for writing: %s" % (source_name, e))
return None
def _close_current_dest(self):
if self.current_dest and self.current_dest.close:
self.current_dest.output.close()
self.current_dest = None
def _write_without_split(self, data, _):
"""
Write the data to the current destination output.
"""
self.current_dest.output.write(data)
def _write_with_split(self, data, num):
"""
Write the data to the current destination output if we still
haven't reached the maximum number of rows. Otherwise split
the rows between the current destination and the next.
"""
if (self.num_written + num) > self.max_output_size:
num_remaining = self.max_output_size - self.num_written
last_switch = 0
for i, row in enumerate([_f for _f in data.split(os.linesep) if _f]):
if i == num_remaining:
self._next_dest()
last_switch = i
num_remaining += self.max_output_size
self.current_dest.output.write(row + '\n')
self.num_written = num - last_switch
else:
self.num_written += num
self.current_dest.output.write(data)
class ExportTask(CopyTask):
"""
A class that exports data to .csv by instantiating one or more processes that work in parallel (ExportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'to')
options = self.options
self.begin_token = int(options.copy['begintoken']) if options.copy['begintoken'] else None
self.end_token = int(options.copy['endtoken']) if options.copy['endtoken'] else None
self.writer = ExportWriter(fname, shell, columns, options)
def run(self):
"""
Initiates the export by starting the worker processes.
Then hand over control to export_records.
"""
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY TO options: %s' % ', '.join(list(self.options.unrecognized.keys())))
return
if not self.validate_columns():
return 0
ranges = self.get_ranges()
if not ranges:
return 0
if not self.writer.open():
return 0
columns = "[" + ", ".join(self.columns) + "]"
self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
params = self.make_params()
for i in range(self.num_processes):
self.processes.append(ExportProcess(self.update_params(params, i)))
self.start_processes()
try:
self.export_records(ranges)
finally:
self.close()
def close(self):
CopyTask.close(self)
self.writer.close()
def get_ranges(self):
"""
return a queue of tuples, where the first tuple entry is a token range (from, to]
and the second entry is a list of hosts that own that range. Each host is responsible
for all the tokens in the range (from, to].
The ring information comes from the driver metadata token map, which is built by
querying System.PEERS.
We only consider replicas that are in the local datacenter. If there are no local replicas
we use the cqlsh session host.
"""
shell = self.shell
hostname = self.host.address
local_dc = self.host.datacenter
ranges = dict()
min_token = self.get_min_token()
begin_token = self.begin_token
end_token = self.end_token
def make_range(prev, curr):
"""
Return the intersection of (prev, curr) and (begin_token, end_token),
return None if the intersection is empty
"""
ret = (prev, curr)
if begin_token:
if curr < begin_token:
return None
elif (prev is None) or (prev < begin_token):
ret = (begin_token, curr)
if end_token:
if (ret[0] is not None) and (ret[0] > end_token):
return None
elif (curr is not None) and (curr > end_token):
ret = (ret[0], end_token)
return ret
def make_range_data(replicas=None):
hosts = []
if replicas:
for r in replicas:
if r.is_up is not False and r.datacenter == local_dc:
hosts.append(r.address)
if not hosts:
hosts.append(hostname) # fallback to default host if no replicas in current dc
return {'hosts': tuple(hosts), 'attempts': 0, 'rows': 0, 'workerno': -1}
if begin_token and begin_token < min_token:
shell.printerr('Begin token %d must be bigger or equal to min token %d' % (begin_token, min_token))
return ranges
if begin_token and end_token and begin_token > end_token:
shell.printerr('Begin token %d must be smaller than end token %d' % (begin_token, end_token))
return ranges
if shell.conn.metadata.token_map is None or min_token is None:
ranges[(begin_token, end_token)] = make_range_data()
return ranges
ring = list(shell.get_ring(self.ks).items())
ring.sort()
if not ring:
# If the ring is empty we get the entire ring from the host we are currently connected to
ranges[(begin_token, end_token)] = make_range_data()
elif len(ring) == 1:
# If there is only one token we get the entire ring from the replicas for that token
ranges[(begin_token, end_token)] = make_range_data(ring[0][1])
else:
# else we loop on the ring
first_range_data = None
previous = None
for token, replicas in ring:
if not first_range_data:
first_range_data = make_range_data(replicas) # we use it at the end when wrapping around
if token.value == min_token:
continue # avoids looping entire ring
current_range = make_range(previous, token.value)
if not current_range:
continue
ranges[current_range] = make_range_data(replicas)
previous = token.value
# For the last ring interval we query the same replicas that hold the first token in the ring
if previous is not None and (not end_token or previous < end_token):
ranges[(previous, end_token)] = first_range_data
elif previous is None and (not end_token or previous < end_token):
previous = begin_token if begin_token else min_token
ranges[(previous, end_token)] = first_range_data
if not ranges:
shell.printerr('Found no ranges to query, check begin and end tokens: %s - %s' % (begin_token, end_token))
return ranges
def get_min_token(self):
"""
:return the minimum token, which depends on the partitioner.
For partitioners that do not support tokens we return None, in
this cases we will not work in parallel, we'll just send all requests
to the cqlsh session host.
"""
partitioner = self.shell.conn.metadata.partitioner
if partitioner.endswith('RandomPartitioner'):
return -1
elif partitioner.endswith('Murmur3Partitioner'):
return -(2 ** 63) # Long.MIN_VALUE in Java
else:
return None
def send_work(self, ranges, tokens_to_send):
prev_worker_no = ranges[tokens_to_send[0]]['workerno']
i = prev_worker_no + 1 if -1 <= prev_worker_no < (self.num_processes - 1) else 0
for token_range in tokens_to_send:
ranges[token_range]['workerno'] = i
self.outmsg.channels[i].send((token_range, ranges[token_range]))
ranges[token_range]['attempts'] += 1
i = i + 1 if i < self.num_processes - 1 else 0
def export_records(self, ranges):
"""
Send records to child processes and monitor them by collecting their results
or any errors. We terminate when we have processed all the ranges or when one child
process has died (since in this case we will never get any ACK for the ranges
processed by it and at the moment we don't keep track of which ranges a
process is handling).
"""
shell = self.shell
processes = self.processes
meter = RateMeter(log_fcn=self.printmsg,
update_interval=self.options.copy['reportfrequency'],
log_file=self.options.copy['ratefile'])
total_requests = len(ranges)
max_attempts = self.options.copy['maxattempts']
self.send_work(ranges, list(ranges.keys()))
num_processes = len(processes)
succeeded = 0
failed = 0
while (failed + succeeded) < total_requests and self.num_live_processes() == num_processes:
for token_range, result in self.inmsg.recv(timeout=0.1):
if token_range is None and result is None: # a request has finished
succeeded += 1
elif isinstance(result, Exception): # an error occurred
# This token_range failed, retry up to max_attempts if no rows received yet,
# If rows were already received we'd risk duplicating data.
# Note that there is still a slight risk of duplicating data, even if we have
# an error with no rows received yet, it's just less likely. To avoid retrying on
# all timeouts would however mean we could risk not exporting some rows.
if ranges[token_range]['attempts'] < max_attempts and ranges[token_range]['rows'] == 0:
shell.printerr('Error for %s: %s (will try again later attempt %d of %d)'
% (token_range, result, ranges[token_range]['attempts'], max_attempts))
self.send_work(ranges, [token_range])
else:
shell.printerr('Error for %s: %s (permanently given up after %d rows and %d attempts)'
% (token_range, result, ranges[token_range]['rows'],
ranges[token_range]['attempts']))
failed += 1
else: # partial result received
data, num = result
self.writer.write(data, num)
meter.increment(n=num)
ranges[token_range]['rows'] += num
if self.num_live_processes() < len(processes):
for process in processes:
if not process.is_alive():
shell.printerr('Child process %d died with exit code %d' % (process.pid, process.exitcode))
if succeeded < total_requests:
shell.printerr('Exported %d ranges out of %d total ranges, some records might be missing'
% (succeeded, total_requests))
self.printmsg("\n%d rows exported to %d files in %s." %
(meter.get_total_records(),
self.writer.num_files,
self.describe_interval(time.time() - self.time_start)))
class FilesReader(object):
"""
A wrapper around a csv reader to keep track of when we have
exhausted reading input files. We are passed a comma separated
list of paths, where each path is a valid glob expression.
We generate a source generator and we read each source one
by one.
"""
def __init__(self, fname, options):
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.fname = fname
self.sources = None # must be created later due to pickle problems on Windows
self.num_sources = 0
self.current_source = None
self.num_read = 0
def get_source(self, paths):
"""
Return a source generator. Each source is a named tuple
wrapping the source input, file name and a boolean indicating
if it requires closing.
"""
def make_source(fname):
try:
return open(fname, 'r')
except IOError as e:
raise IOError("Can't open %r for reading: %s" % (fname, e))
for path in paths.split(','):
path = path.strip()
if os.path.isfile(path):
yield make_source(path)
else:
result = glob.glob(path)
if len(result) == 0:
raise IOError("Can't open %r for reading: no matching file found" % (path,))
for f in result:
yield (make_source(f))
def start(self):
self.sources = self.get_source(self.fname)
self.next_source()
@property
def exhausted(self):
return not self.current_source
def next_source(self):
"""
Close the current source, if any, and open the next one. Return true
if there is another source, false otherwise.
"""
self.close_current_source()
while self.current_source is None:
try:
self.current_source = next(self.sources)
if self.current_source:
self.num_sources += 1
except StopIteration:
return False
if self.header:
next(self.current_source)
return True
def close_current_source(self):
if not self.current_source:
return
self.current_source.close()
self.current_source = None
def close(self):
self.close_current_source()
def read_rows(self, max_rows):
if not self.current_source:
return []
rows = []
for i in range(min(max_rows, self.chunk_size)):
try:
row = next(self.current_source)
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.next_source()
break
if self.num_read > self.skip_rows:
rows.append(row)
except StopIteration:
self.next_source()
break
return [_f for _f in rows if _f]
class PipeReader(object):
"""
A class for reading rows received on a pipe, this is used for reading input from STDIN
"""
def __init__(self, inpipe, options):
self.inpipe = inpipe
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.num_read = 0
self.exhausted = False
self.num_sources = 1
def start(self):
pass
def read_rows(self, max_rows):
rows = []
for i in range(min(max_rows, self.chunk_size)):
row = self.inpipe.recv()
if row is None:
self.exhausted = True
break
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.exhausted = True
break # max rows exceeded
if self.header or self.num_read < self.skip_rows:
self.header = False # skip header or initial skip_rows rows
continue
rows.append(row)
return rows
class ImportProcessResult(object):
"""
An object sent from ImportProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, imported=0):
self.imported = imported
class FeedingProcessResult(object):
"""
An object sent from FeedingProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, sent, reader):
self.sent = sent
self.num_sources = reader.num_sources
self.skip_rows = reader.skip_rows
class ImportTaskError(object):
"""
An object sent from child processes (feeder or workers) to the parent import task to indicate an error.
"""
def __init__(self, name, msg, rows=None, attempts=1, final=True):
self.name = name
self.msg = msg
self.rows = rows if rows else []
self.attempts = attempts
self.final = final
def is_parse_error(self):
"""
We treat read and parse errors as unrecoverable and we have different global counters for giving up when
a maximum has been reached. We consider value and type errors as parse errors as well since they
are typically non recoverable.
"""
name = self.name
return name.startswith('ValueError') or name.startswith('TypeError') or \
name.startswith('ParseError') or name.startswith('IndexError') or name.startswith('ReadError')
class ImportErrorHandler(object):
"""
A class for managing import errors
"""
def __init__(self, task):
self.shell = task.shell
self.options = task.options
self.max_attempts = self.options.copy['maxattempts']
self.max_parse_errors = self.options.copy['maxparseerrors']
self.max_insert_errors = self.options.copy['maxinserterrors']
self.err_file = self.options.copy['errfile']
self.parse_errors = 0
self.insert_errors = 0
self.num_rows_failed = 0
if os.path.isfile(self.err_file):
now = datetime.datetime.now()
old_err_file = self.err_file + now.strftime('.%Y%m%d_%H%M%S')
printdebugmsg("Renaming existing %s to %s\n" % (self.err_file, old_err_file))
os.rename(self.err_file, old_err_file)
def max_exceeded(self):
if self.insert_errors > self.max_insert_errors >= 0:
self.shell.printerr("Exceeded maximum number of insert errors %d" % self.max_insert_errors)
return True
if self.parse_errors > self.max_parse_errors >= 0:
self.shell.printerr("Exceeded maximum number of parse errors %d" % self.max_parse_errors)
return True
return False
def add_failed_rows(self, rows):
self.num_rows_failed += len(rows)
with open(self.err_file, "a") as f:
writer = csv.writer(f, **self.options.dialect)
for row in rows:
writer.writerow(row)
def handle_error(self, err):
"""
Handle an error by printing the appropriate error message and incrementing the correct counter.
"""
shell = self.shell
if err.is_parse_error():
self.parse_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up without retries"
% (len(err.rows), err.name, err.msg))
else:
if not err.final:
shell.printerr("Failed to import %d rows: %s - %s, will retry later, attempt %d of %d"
% (len(err.rows), err.name, err.msg, err.attempts, self.max_attempts))
else:
self.insert_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up after %d attempts"
% (len(err.rows), err.name, err.msg, err.attempts))
class ImportTask(CopyTask):
"""
A class to import data from .csv by instantiating one or more processes
that work in parallel (ImportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'from')
options = self.options
self.skip_columns = [c.strip() for c in self.options.copy['skipcols'].split(',')]
self.valid_columns = [c for c in self.columns if c not in self.skip_columns]
self.receive_meter = RateMeter(log_fcn=self.printmsg,
update_interval=options.copy['reportfrequency'],
log_file=options.copy['ratefile'])
self.error_handler = ImportErrorHandler(self)
self.feeding_result = None
self.sent = 0
def make_params(self):
ret = CopyTask.make_params(self)
ret['skip_columns'] = self.skip_columns
ret['valid_columns'] = self.valid_columns
return ret
def validate_columns(self):
if not CopyTask.validate_columns(self):
return False
shell = self.shell
if not self.valid_columns:
shell.printerr("No valid column specified")
return False
for c in self.table_meta.primary_key:
if c.name not in self.valid_columns:
shell.printerr("Primary key column '%s' missing or skipped" % (c.name,))
return False
return True
def run(self):
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY FROM options: %s' % ', '.join(list(self.options.unrecognized.keys())))
return
if not self.validate_columns():
return 0
columns = "[" + ", ".join(self.valid_columns) + "]"
self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
try:
params = self.make_params()
for i in range(self.num_processes - 1):
self.processes.append(ImportProcess(self.update_params(params, i)))
feeder = FeedingProcess(self.outmsg.pipes[-1], self.inmsg.pipes[-1],
self.outmsg.pipes[:-1], self.fname, self.options)
self.processes.append(feeder)
self.start_processes()
pr = profile_on() if PROFILE_ON else None
self.import_records()
if pr:
profile_off(pr, file_name='parent_profile_%d.txt' % (os.getpid(),))
except Exception as exc:
shell.printerr(str(exc))
if shell.debug:
traceback.print_exc()
return 0
finally:
self.close()
def send_stdin_rows(self):
"""
We need to pass stdin rows to the feeder process as it is not safe to pickle or share stdin
directly (in case of file the child process would close it). This is a very primitive support
for STDIN import in that we we won't start reporting progress until STDIN is fully consumed. I
think this is reasonable.
"""
shell = self.shell
self.printmsg("[Use . on a line by itself to end input]")
for row in shell.use_stdin_reader(prompt='[copy] ', until=r'.'):
self.outmsg.channels[-1].send(row)
self.outmsg.channels[-1].send(None)
if shell.tty:
print()
def import_records(self):
"""
Keep on running until we have stuff to receive or send and until all processes are running.
Send data (batches or retries) up to the max ingest rate. If we are waiting for stuff to
receive check the incoming queue.
"""
if not self.fname:
self.send_stdin_rows()
child_timeout = self.options.copy['childtimeout']
last_recv_num_records = 0
last_recv_time = time.time()
while self.feeding_result is None or self.receive_meter.total_records < self.feeding_result.sent:
self.receive_results()
if self.feeding_result is not None:
if self.receive_meter.total_records != last_recv_num_records:
last_recv_num_records = self.receive_meter.total_records
last_recv_time = time.time()
elif (time.time() - last_recv_time) > child_timeout:
self.shell.printerr("No records inserted in {} seconds, aborting".format(child_timeout))
break
if self.error_handler.max_exceeded() or not self.all_processes_running():
break
if self.error_handler.num_rows_failed:
self.shell.printerr("Failed to process %d rows; failed rows written to %s" %
(self.error_handler.num_rows_failed,
self.error_handler.err_file))
if not self.all_processes_running():
self.shell.printerr("{} child process(es) died unexpectedly, aborting"
.format(self.num_processes - self.num_live_processes()))
else:
if self.error_handler.max_exceeded():
self.processes[-1].terminate() # kill the feeder
for i, _ in enumerate(self.processes):
if self.processes[i].is_alive():
self.outmsg.channels[i].send(None)
# allow time for worker processes to exit cleanly
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and self.num_live_processes() > 0:
time.sleep(0.1)
attempts -= 1
self.printmsg("\n%d rows imported from %d files in %s (%d skipped)." %
(self.receive_meter.get_total_records() - self.error_handler.num_rows_failed,
self.feeding_result.num_sources if self.feeding_result else 0,
self.describe_interval(time.time() - self.time_start),
self.feeding_result.skip_rows if self.feeding_result else 0))
def all_processes_running(self):
return self.num_live_processes() == len(self.processes)
def receive_results(self):
"""
Receive results from the worker processes, which will send the number of rows imported
or from the feeder process, which will send the number of rows sent when it has finished sending rows.
"""
aggregate_result = ImportProcessResult()
try:
for result in self.inmsg.recv(timeout=0.1):
if isinstance(result, ImportProcessResult):
aggregate_result.imported += result.imported
elif isinstance(result, ImportTaskError):
self.error_handler.handle_error(result)
elif isinstance(result, FeedingProcessResult):
self.feeding_result = result
else:
raise ValueError("Unexpected result: %s" % (result,))
finally:
self.receive_meter.increment(aggregate_result.imported)
class FeedingProcess(mp.Process):
"""
A process that reads from import sources and sends chunks to worker processes.
"""
def __init__(self, inpipe, outpipe, worker_pipes, fname, options):
super(FeedingProcess, self).__init__(target=self.run)
self.inpipe = inpipe
self.outpipe = outpipe
self.worker_pipes = worker_pipes
self.inmsg = None # must be created after forking on Windows
self.outmsg = None # must be created after forking on Windows
self.worker_channels = None # must be created after forking on Windows
self.reader = FilesReader(fname, options) if fname else PipeReader(inpipe, options)
self.send_meter = RateMeter(log_fcn=None, update_interval=1)
self.ingest_rate = options.copy['ingestrate']
self.num_worker_processes = options.copy['numprocesses']
self.max_pending_chunks = options.copy['maxpendingchunks']
self.chunk_id = 0
def on_fork(self):
"""
Create the channels and release any parent connections after forking,
see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
self.worker_channels = [SendingChannel(p) for p in self.worker_pipes]
def run(self):
pr = profile_on() if PROFILE_ON else None
self.inner_run()
if pr:
profile_off(pr, file_name='feeder_profile_%d.txt' % (os.getpid(),))
def inner_run(self):
"""
Send one batch per worker process to the queue unless we have exceeded the ingest rate.
In the export case we queue everything and let the worker processes throttle using max_requests,
here we throttle using the ingest rate in the feeding process because of memory usage concerns.
When finished we send back to the parent process the total number of rows sent.
"""
self.on_fork()
reader = self.reader
try:
reader.start()
except IOError as exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
channels = self.worker_channels
max_pending_chunks = self.max_pending_chunks
sent = 0
failed_attempts = 0
while not reader.exhausted:
channels_eligible = [c for c in channels if c.num_pending() < max_pending_chunks]
if not channels_eligible:
failed_attempts += 1
delay = randint(1, pow(2, failed_attempts))
printdebugmsg("All workers busy, sleeping for %d second(s)" % (delay,))
time.sleep(delay)
continue
elif failed_attempts > 0:
failed_attempts = 0
for ch in channels_eligible:
try:
max_rows = self.ingest_rate - self.send_meter.current_record
if max_rows <= 0:
self.send_meter.maybe_update(sleep=False)
continue
rows = reader.read_rows(max_rows)
if rows:
sent += self.send_chunk(ch, rows)
except Exception as exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
if reader.exhausted:
break
# send back to the parent process the number of rows sent to the worker processes
self.outmsg.send(FeedingProcessResult(sent, reader))
# wait for poison pill (None)
self.inmsg.recv()
def send_chunk(self, ch, rows):
self.chunk_id += 1
num_rows = len(rows)
self.send_meter.increment(num_rows)
ch.send({'id': self.chunk_id, 'rows': rows, 'imported': 0, 'num_rows_sent': num_rows})
return num_rows
def close(self):
self.reader.close()
self.inmsg.close()
self.outmsg.close()
for ch in self.worker_channels:
ch.close()
class ChildProcess(mp.Process):
"""
An child worker process, this is for common functionality between ImportProcess and ExportProcess.
"""
def __init__(self, params, target):
super(ChildProcess, self).__init__(target=target)
self.inpipe = params['inpipe']
self.outpipe = params['outpipe']
self.inmsg = None # must be initialized after fork on Windows
self.outmsg = None # must be initialized after fork on Windows
self.ks = params['ks']
self.table = params['table']
self.local_dc = params['local_dc']
self.columns = params['columns']
self.debug = params['debug']
self.port = params['port']
self.hostname = params['hostname']
self.connect_timeout = params['connect_timeout']
self.cql_version = params['cql_version']
self.auth_provider = params['auth_provider']
self.ssl = params['ssl']
self.protocol_version = params['protocol_version']
self.config_file = params['config_file']
options = params['options']
self.date_time_format = options.copy['dtformats']
self.consistency_level = options.copy['consistencylevel']
self.decimal_sep = options.copy['decimalsep']
self.thousands_sep = options.copy['thousandssep']
self.boolean_styles = options.copy['boolstyle']
self.max_attempts = options.copy['maxattempts']
self.encoding = options.copy['encoding']
# Here we inject some failures for testing purposes, only if this environment variable is set
if os.environ.get('CQLSH_COPY_TEST_FAILURES', ''):
self.test_failures = json.loads(os.environ.get('CQLSH_COPY_TEST_FAILURES', ''))
else:
self.test_failures = None
# attributes for coverage
self.coverage = params['coverage']
self.coveragerc_path = params['coveragerc_path']
self.coverage_collection = None
self.sigterm_handler = None
self.sighup_handler = None
def on_fork(self):
"""
Create the channels and release any parent connections after forking, see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
def close(self):
printdebugmsg("Closing queues...")
self.inmsg.close()
self.outmsg.close()
def start_coverage(self):
import coverage
self.coverage_collection = coverage.Coverage(config_file=self.coveragerc_path)
self.coverage_collection.start()
# save current handlers for SIGTERM and SIGHUP
self.sigterm_handler = signal.getsignal(signal.SIGTERM)
self.sighup_handler = signal.getsignal(signal.SIGTERM)
def handle_sigterm():
self.stop_coverage()
self.close()
self.terminate()
# set custom handler for SIGHUP and SIGTERM
# needed to make sure coverage data is saved
signal.signal(signal.SIGTERM, handle_sigterm)
signal.signal(signal.SIGHUP, handle_sigterm)
def stop_coverage(self):
self.coverage_collection.stop()
self.coverage_collection.save()
signal.signal(signal.SIGTERM, self.sigterm_handler)
signal.signal(signal.SIGHUP, self.sighup_handler)
class ExpBackoffRetryPolicy(RetryPolicy):
"""
A retry policy with exponential back-off for read timeouts and write timeouts
"""
def __init__(self, parent_process):
RetryPolicy.__init__(self)
self.max_attempts = parent_process.max_attempts
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
return self._handle_timeout(consistency, retry_num)
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
return self._handle_timeout(consistency, retry_num)
def _handle_timeout(self, consistency, retry_num):
delay = self.backoff(retry_num)
if delay > 0:
printdebugmsg("Timeout received, retrying after %d seconds" % (delay,))
time.sleep(delay)
return self.RETRY, consistency
elif delay == 0:
printdebugmsg("Timeout received, retrying immediately")
return self.RETRY, consistency
else:
printdebugmsg("Timeout received, giving up after %d attempts" % (retry_num + 1))
return self.RETHROW, None
def backoff(self, retry_num):
"""
Perform exponential back-off up to a maximum number of times, where
this maximum is per query.
To back-off we should wait a random number of seconds
between 0 and 2^c - 1, where c is the number of total failures.
:return : the number of seconds to wait for, -1 if we should not retry
"""
if retry_num >= self.max_attempts:
return -1
delay = randint(0, pow(2, retry_num + 1) - 1)
return delay
class ExportSession(object):
"""
A class for connecting to a cluster and storing the number
of requests that this connection is processing. It wraps the methods
for executing a query asynchronously and for shutting down the
connection to the cluster.
"""
def __init__(self, cluster, export_process):
session = cluster.connect(export_process.ks)
session.row_factory = tuple_factory
session.default_fetch_size = export_process.options.copy['pagesize']
session.default_timeout = export_process.options.copy['pagetimeout']
printdebugmsg("Created connection to %s with page size %d and timeout %d seconds per page"
% (cluster.contact_points, session.default_fetch_size, session.default_timeout))
self.cluster = cluster
self.session = session
self.requests = 1
self.lock = threading.Lock()
self.consistency_level = export_process.consistency_level
def add_request(self):
with self.lock:
self.requests += 1
def complete_request(self):
with self.lock:
self.requests -= 1
def num_requests(self):
with self.lock:
return self.requests
def execute_async(self, query):
return self.session.execute_async(SimpleStatement(query, consistency_level=self.consistency_level))
def shutdown(self):
self.cluster.shutdown()
class ExportProcess(ChildProcess):
"""
An child worker process for the export task, ExportTask.
"""
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
options = params['options']
self.float_precision = options.copy['floatprecision']
self.double_precision = options.copy['doubleprecision']
self.nullval = options.copy['nullval']
self.max_requests = options.copy['maxrequests']
self.hosts_to_sessions = dict()
self.formatters = dict()
self.options = options
def run(self):
if self.coverage:
self.start_coverage()
try:
self.inner_run()
finally:
if self.coverage:
self.stop_coverage()
self.close()
def inner_run(self):
"""
The parent sends us (range, info) on the inbound queue (inmsg)
in order to request us to process a range, for which we can
select any of the hosts in info, which also contains other information for this
range such as the number of attempts already performed. We can signal errors
on the outbound queue (outmsg) by sending (range, error) or
we can signal a global error by sending (None, error).
We terminate when the inbound queue is closed.
"""
self.on_fork()
while True:
if self.num_requests() > self.max_requests:
time.sleep(0.001) # 1 millisecond
continue
token_range, info = self.inmsg.recv()
self.start_request(token_range, info)
@staticmethod
def get_error_message(err, print_traceback=False):
if isinstance(err, str):
msg = err
elif isinstance(err, BaseException):
msg = "%s - %s" % (err.__class__.__name__, err)
if print_traceback and sys.exc_info()[1] == err:
traceback.print_exc()
else:
msg = str(err)
return msg
def report_error(self, err, token_range):
msg = self.get_error_message(err, print_traceback=self.debug)
printdebugmsg(msg)
self.send((token_range, Exception(msg)))
def send(self, response):
self.outmsg.send(response)
def start_request(self, token_range, info):
"""
Begin querying a range by executing an async query that
will later on invoke the callbacks attached in attach_callbacks.
"""
session = self.get_session(info['hosts'], token_range)
if session:
metadata = session.cluster.metadata.keyspaces[self.ks].tables[self.table]
query = self.prepare_query(metadata.partition_key, token_range, info['attempts'])
future = session.execute_async(query)
self.attach_callbacks(token_range, future, session)
def num_requests(self):
return sum(session.num_requests() for session in list(self.hosts_to_sessions.values()))
def get_session(self, hosts, token_range):
"""
We return a session connected to one of the hosts passed in, which are valid replicas for
the token range. We sort replicas by favouring those without any active requests yet or with the
smallest number of requests. If we fail to connect we report an error so that the token will
be retried again later.
:return: An ExportSession connected to the chosen host.
"""
# sorted replicas favouring those with no connections yet
hosts = sorted(hosts,
key=lambda hh: 0 if hh not in self.hosts_to_sessions else self.hosts_to_sessions[hh].requests)
errors = []
ret = None
for host in hosts:
try:
ret = self.connect(host)
except Exception as e:
errors.append(self.get_error_message(e))
if ret:
if errors:
printdebugmsg("Warning: failed to connect to some replicas: %s" % (errors,))
return ret
self.report_error("Failed to connect to all replicas %s for %s, errors: %s" % (hosts, token_range, errors),
token_range)
return None
def connect(self, host):
if host in list(self.hosts_to_sessions.keys()):
session = self.hosts_to_sessions[host]
session.add_request()
return session
new_cluster = Cluster(
contact_points=(host,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
ssl_options=ssl_settings(host, self.config_file) if self.ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([host]),
default_retry_policy=ExpBackoffRetryPolicy(self),
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0)
session = ExportSession(new_cluster, self)
self.hosts_to_sessions[host] = session
return session
def attach_callbacks(self, token_range, future, session):
metadata = session.cluster.metadata
ks_meta = metadata.keyspaces[self.ks]
table_meta = ks_meta.tables[self.table]
cql_types = [CqlType(table_meta.columns[c].cql_type, ks_meta) for c in self.columns]
def result_callback(rows):
if future.has_more_pages:
future.start_fetching_next_page()
self.write_rows_to_csv(token_range, rows, cql_types)
else:
self.write_rows_to_csv(token_range, rows, cql_types)
self.send((None, None))
session.complete_request()
def err_callback(err):
self.report_error(err, token_range)
session.complete_request()
future.add_callbacks(callback=result_callback, errback=err_callback)
def write_rows_to_csv(self, token_range, rows, cql_types):
if not rows:
return # no rows in this range
try:
output = StringIO() if six.PY3 else BytesIO()
writer = csv.writer(output, **self.options.dialect)
for row in rows:
print("cqlshlib.copyutil.ExportProcess.write_rows_to_csv(): writing row")
writer.writerow(list(map(self.format_value, row, cql_types)))
data = (output.getvalue(), len(rows))
self.send((token_range, data))
output.close()
except Exception as e:
self.report_error(e, token_range)
def format_value(self, val, cqltype):
if val is None or val == EMPTY:
return format_value_default(self.nullval, colormap=NO_COLOR_MAP)
formatter = self.formatters.get(cqltype, None)
if not formatter:
formatter = get_formatter(val, cqltype)
self.formatters[cqltype] = formatter
if not hasattr(cqltype, 'precision'):
cqltype.precision = self.double_precision if cqltype.type_name == 'double' else self.float_precision
formatted = formatter(val, cqltype=cqltype,
encoding=self.encoding, colormap=NO_COLOR_MAP, date_time_format=self.date_time_format,
float_precision=cqltype.precision, nullval=self.nullval, quote=False,
decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep,
boolean_styles=self.boolean_styles)
return formatted if six.PY3 else formatted.encode('utf8')
def close(self):
ChildProcess.close(self)
for session in list(self.hosts_to_sessions.values()):
session.shutdown()
def prepare_query(self, partition_key, token_range, attempts):
"""
Return the export query or a fake query with some failure injected.
"""
if self.test_failures:
return self.maybe_inject_failures(partition_key, token_range, attempts)
else:
return self.prepare_export_query(partition_key, token_range)
def maybe_inject_failures(self, partition_key, token_range, attempts):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
start_token, end_token = token_range
if not start_token or not end_token:
# exclude first and last ranges to make things simpler
return self.prepare_export_query(partition_key, token_range)
if 'failing_range' in self.test_failures:
failing_range = self.test_failures['failing_range']
if start_token >= failing_range['start'] and end_token <= failing_range['end']:
if attempts < failing_range['num_failures']:
return 'SELECT * from bad_table'
if 'exit_range' in self.test_failures:
exit_range = self.test_failures['exit_range']
if start_token >= exit_range['start'] and end_token <= exit_range['end']:
sys.exit(1)
return self.prepare_export_query(partition_key, token_range)
def prepare_export_query(self, partition_key, token_range):
"""
Return a query where we select all the data for this token range
"""
pk_cols = ", ".join(protect_names(col.name for col in partition_key))
columnlist = ', '.join(protect_names(self.columns))
start_token, end_token = token_range
query = 'SELECT %s FROM %s.%s' % (columnlist, protect_name(self.ks), protect_name(self.table))
if start_token is not None or end_token is not None:
query += ' WHERE'
if start_token is not None:
query += ' token(%s) > %s' % (pk_cols, start_token)
if start_token is not None and end_token is not None:
query += ' AND'
if end_token is not None:
query += ' token(%s) <= %s' % (pk_cols, end_token)
return query
class ParseError(Exception):
""" We failed to parse an import record """
pass
class ImmutableDict(frozenset):
"""
Immutable dictionary implementation to represent map types.
We need to pass BoundStatement.bind() a dict() because it calls iteritems(),
except we can't create a dict with another dict as the key, hence we use a class
that adds iteritems to a frozen set of tuples (which is how dict are normally made
immutable in python).
Must be declared in the top level of the module to be available for pickling.
"""
iteritems = frozenset.__iter__
def items(self):
for k, v in self.iteritems():
yield k, v
class ImportConversion(object):
"""
A class for converting strings to values when importing from csv, used by ImportProcess,
the parent.
"""
def __init__(self, parent, table_meta, statement=None):
self.ks = parent.ks
self.table = parent.table
self.columns = parent.valid_columns
self.nullval = parent.nullval
self.decimal_sep = parent.decimal_sep
self.thousands_sep = parent.thousands_sep
self.boolean_styles = parent.boolean_styles
self.date_time_format = parent.date_time_format.timestamp_format
self.debug = parent.debug
self.encoding = parent.encoding
self.table_meta = table_meta
self.primary_key_indexes = [self.columns.index(col.name) for col in self.table_meta.primary_key]
self.partition_key_indexes = [self.columns.index(col.name) for col in self.table_meta.partition_key]
if statement is None:
self.use_prepared_statements = False
statement = self._get_primary_key_statement(parent, table_meta)
else:
self.use_prepared_statements = True
self.is_counter = parent.is_counter(table_meta)
self.proto_version = statement.protocol_version
# the cql types and converters for the prepared statement, either the full statement or only the primary keys
self.cqltypes = [c.type for c in statement.column_metadata]
self.converters = [self._get_converter(c.type) for c in statement.column_metadata]
# the cql types for the entire statement, these are the same as the types above but
# only when using prepared statements
self.coltypes = [table_meta.columns[name].cql_type for name in parent.valid_columns]
# these functions are used for non-prepared statements to protect values with quotes if required
self.protectors = [self._get_protector(t) for t in self.coltypes]
@staticmethod
def _get_protector(t):
if t in ('ascii', 'text', 'timestamp', 'date', 'time', 'inet'):
return lambda v: protect_value(v)
else:
return lambda v: v
@staticmethod
def _get_primary_key_statement(parent, table_meta):
"""
We prepare a query statement to find out the types of the partition key columns so we can
route the update query to the correct replicas. As far as I understood this is the easiest
way to find out the types of the partition columns, we will never use this prepared statement
"""
where_clause = ' AND '.join(['%s = ?' % (protect_name(c.name)) for c in table_meta.partition_key])
select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks),
protect_name(parent.table),
where_clause)
return parent.session.prepare(ensure_str(select_query))
@staticmethod
def unprotect(v):
if v is not None:
return CqlRuleSet.dequote_value(v)
def _get_converter(self, cql_type):
"""
Return a function that converts a string into a value the can be passed
into BoundStatement.bind() for the given cql type. See cassandra.cqltypes
for more details.
"""
unprotect = self.unprotect
def convert(t, v):
v = unprotect(v)
if v == self.nullval:
return self.get_null_val()
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_mandatory(t, v):
v = unprotect(v)
# we can't distinguish between empty strings and null values in csv. Null values are not supported in
# collections, so it must be an empty string.
if v == self.nullval and not issubclass(t, VarcharType):
raise ParseError('Empty values are not allowed')
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_blob(v, **_):
if sys.version_info.major >= 3:
return bytes.fromhex(v[2:])
else:
return BlobType(v[2:].decode("hex"))
def convert_text(v, **_):
return ensure_str(v)
def convert_uuid(v, **_):
return UUID(v)
def convert_bool(v, **_):
return True if v.lower() == ensure_str(self.boolean_styles[0]).lower() else False
def get_convert_integer_fcn(adapter=int):
"""
Return a slow and a fast integer conversion function depending on self.thousands_sep
"""
if self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ensure_str('')))
else:
return lambda v, ct=cql_type: adapter(v)
def get_convert_decimal_fcn(adapter=float):
"""
Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep
"""
empty_str = ensure_str('')
dot_str = ensure_str('.')
if self.thousands_sep and self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str).replace(self.decimal_sep, dot_str))
elif self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str))
elif self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.decimal_sep, dot_str))
else:
return lambda v, ct=cql_type: adapter(v)
def split(val, sep=','):
"""
Split "val" into a list of values whenever the separator "sep" is found, but
ignore separators inside parentheses or single quotes, except for the two
outermost parentheses, which will be ignored. This method is called when parsing composite
types, "val" should be at least 2 characters long, the first char should be an
open parenthesis and the last char should be a matching closing parenthesis. We could also
check exactly which parenthesis type depending on the caller, but I don't want to enforce
too many checks that don't necessarily provide any additional benefits, and risk breaking
data that could previously be imported, even if strictly speaking it is incorrect CQL.
For example, right now we accept sets that start with '[' and ']', I don't want to break this
by enforcing '{' and '}' in a minor release.
"""
def is_open_paren(cc):
return cc == '{' or cc == '[' or cc == '('
def is_close_paren(cc):
return cc == '}' or cc == ']' or cc == ')'
def paren_match(c1, c2):
return (c1 == '{' and c2 == '}') or (c1 == '[' and c2 == ']') or (c1 == '(' and c2 == ')')
if len(val) < 2 or not paren_match(val[0], val[-1]):
raise ParseError('Invalid composite string, it should start and end with matching parentheses: {}'
.format(val))
ret = []
last = 1
level = 0
quote = False
for i, c in enumerate(val):
if c == '\'':
quote = not quote
elif not quote:
if is_open_paren(c):
level += 1
elif is_close_paren(c):
level -= 1
elif c == sep and level == 1:
ret.append(val[last:i])
last = i + 1
else:
if last < len(val) - 1:
ret.append(val[last:-1])
return ret
# this should match all possible CQL and CQLSH datetime formats
p = re.compile(r"(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?" # YYYY-MM-DD[( |'T')]
+ r"(?:(\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{1,6}))?))?" # [HH:MM[:SS[.NNNNNN]]]
+ r"(?:([+\-])(\d{2}):?(\d{2}))?") # [(+|-)HH[:]MM]]
def convert_datetime(val, **_):
try:
if six.PY2:
# Python 2 implementation
tval = time.strptime(val, self.date_time_format)
return timegm(tval) * 1e3 # scale seconds to millis for the raw value
else:
# Python 3 implementation
dtval = datetime.datetime.strptime(val, self.date_time_format)
return dtval.timestamp() * 1000
except ValueError:
pass # if it's not in the default format we try CQL formats
m = p.match(val)
if not m:
try:
# in case of overflow COPY TO prints dates as milliseconds from the epoch, see
# deserialize_date_fallback_int in cqlsh.py
return int(val)
except ValueError:
raise ValueError("can't interpret %r as a date with format %s or as int" % (val,
self.date_time_format))
# https://docs.python.org/2/library/time.html#time.struct_time
tval = time.struct_time((int(m.group(1)), int(m.group(2)), int(m.group(3)), # year, month, day
int(m.group(4)) if m.group(4) else 0, # hour
int(m.group(5)) if m.group(5) else 0, # minute
int(m.group(6)) if m.group(6) else 0, # second
0, 1, -1)) # day of week, day of year, dst-flag
# convert sub-seconds (a number between 1 and 6 digits) to milliseconds
milliseconds = 0 if not m.group(7) else int(m.group(7)) * pow(10, 3 - len(m.group(7)))
if m.group(8):
offset = (int(m.group(9)) * 3600 + int(m.group(10)) * 60) * int(m.group(8) + '1')
else:
offset = -time.timezone
# scale seconds to millis for the raw value
return ((timegm(tval) + offset) * 1000) + milliseconds
def convert_date(v, **_):
return Date(v)
def convert_time(v, **_):
return Time(v)
def convert_tuple(val, ct=cql_type):
return tuple(convert_mandatory(t, v) for t, v in zip(ct.subtypes, split(val)))
def convert_list(val, ct=cql_type):
return tuple(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_set(val, ct=cql_type):
return frozenset(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_map(val, ct=cql_type):
"""
See ImmutableDict above for a discussion of why a special object is needed here.
"""
split_format_str = ensure_str('{%s}')
sep = ensure_str(':')
return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1]))
for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]))
def convert_user_type(val, ct=cql_type):
"""
A user type is a dictionary except that we must convert each key into
an attribute, so we are using named tuples. It must also be hashable,
so we cannot use dictionaries. Maybe there is a way to instantiate ct
directly but I could not work it out.
Also note that it is possible that the subfield names in the csv are in the
wrong order, so we must sort them according to ct.fieldnames, see CASSANDRA-12959.
"""
split_format_str = ensure_str('{%s}')
sep = ensure_str(':')
vals = [v for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]]
dict_vals = dict((unprotect(v[0]), v[1]) for v in vals)
sorted_converted_vals = [(n, convert(t, dict_vals[n]) if n in dict_vals else self.get_null_val())
for n, t in zip(ct.fieldnames, ct.subtypes)]
ret_type = namedtuple(ct.typename, [v[0] for v in sorted_converted_vals])
return ret_type(*tuple(v[1] for v in sorted_converted_vals))
def convert_single_subtype(val, ct=cql_type):
return converters.get(ct.subtypes[0].typename, convert_unknown)(val, ct=ct.subtypes[0])
def convert_unknown(val, ct=cql_type):
if issubclass(ct, UserType):
return convert_user_type(val, ct=ct)
elif issubclass(ct, ReversedType):
return convert_single_subtype(val, ct=ct)
printdebugmsg("Unknown type %s (%s) for val %s" % (ct, ct.typename, val))
return val
converters = {
'blob': convert_blob,
'decimal': get_convert_decimal_fcn(adapter=Decimal),
'uuid': convert_uuid,
'boolean': convert_bool,
'tinyint': get_convert_integer_fcn(),
'ascii': convert_text,
'float': get_convert_decimal_fcn(),
'double': get_convert_decimal_fcn(),
'bigint': get_convert_integer_fcn(adapter=int),
'int': get_convert_integer_fcn(),
'varint': get_convert_integer_fcn(),
'inet': convert_text,
'counter': get_convert_integer_fcn(adapter=int),
'timestamp': convert_datetime,
'timeuuid': convert_uuid,
'date': convert_date,
'smallint': get_convert_integer_fcn(),
'time': convert_time,
'text': convert_text,
'varchar': convert_text,
'list': convert_list,
'set': convert_set,
'map': convert_map,
'tuple': convert_tuple,
'frozen': convert_single_subtype,
}
return converters.get(cql_type.typename, convert_unknown)
def get_null_val(self):
"""
Return the null value that is inserted for fields that are missing from csv files.
For counters we should return zero so that the counter value won't be incremented.
For everything else we return nulls, this means None if we use prepared statements
or "NULL" otherwise. Note that for counters we never use prepared statements, so we
only check is_counter when use_prepared_statements is false.
"""
return None if self.use_prepared_statements else (ensure_str("0") if self.is_counter else ensure_str("NULL"))
def convert_row(self, row):
"""
Convert the row into a list of parsed values if using prepared statements, else simply apply the
protection functions to escape values with quotes when required. Also check on the row length and
make sure primary partition key values aren't missing.
"""
converters = self.converters if self.use_prepared_statements else self.protectors
if len(row) != len(converters):
raise ParseError('Invalid row length %d should be %d' % (len(row), len(converters)))
for i in self.primary_key_indexes:
if row[i] == self.nullval:
raise ParseError(self.get_null_primary_key_message(i))
def convert(c, v):
try:
return c(v) if v != self.nullval else self.get_null_val()
except Exception as e:
# if we could not convert an empty string, then self.nullval has been set to a marker
# because the user needs to import empty strings, except that the converters for some types
# will fail to convert an empty string, in this case the null value should be inserted
# see CASSANDRA-12794
if v == '':
return self.get_null_val()
if self.debug:
traceback.print_exc()
raise ParseError("Failed to parse %s : %s" % (v, e.message if hasattr(e, 'message') else str(e)))
return [convert(conv, val) for conv, val in zip(converters, row)]
def get_null_primary_key_message(self, idx):
message = "Cannot insert null value for primary key column '%s'." % (self.columns[idx],)
if self.nullval == '':
message += " If you want to insert empty strings, consider using" \
" the WITH NULL=<marker> option for COPY."
return message
def get_row_partition_key_values_fcn(self):
"""
Return a function to convert a row into a string composed of the partition key values serialized
and binary packed (the tokens on the ring). Depending on whether we are using prepared statements, we
may have to convert the primary key values first, so we have two different serialize_value implementations.
We also return different functions depending on how many partition key indexes we have (single or multiple).
See also BoundStatement.routing_key.
"""
def serialize_value_prepared(n, v):
return self.cqltypes[n].serialize(v, self.proto_version)
def serialize_value_not_prepared(n, v):
return self.cqltypes[n].serialize(self.converters[n](self.unprotect(v)), self.proto_version)
partition_key_indexes = self.partition_key_indexes
serialize = serialize_value_prepared if self.use_prepared_statements else serialize_value_not_prepared
def serialize_row_single(row):
return serialize(partition_key_indexes[0], row[partition_key_indexes[0]])
def serialize_row_multiple(row):
pk_values = []
for i in partition_key_indexes:
val = serialize(i, row[i])
length = len(val)
pk_values.append(struct.pack(">H%dsB" % length, length, val, 0))
return b"".join(pk_values)
if len(partition_key_indexes) == 1:
return serialize_row_single
return serialize_row_multiple
class TokenMap(object):
"""
A wrapper around the metadata token map to speed things up by caching ring token *values* and
replicas. It is very important that we use the token values, which are primitive types, rather
than the tokens classes when calling bisect_right() in split_batches(). If we use primitive values,
the bisect is done in compiled code whilst with token classes each comparison requires a call
into the interpreter to perform the cmp operation defined in Python. A simple test with 1 million bisect
operations on an array of 2048 tokens was done in 0.37 seconds with primitives and 2.25 seconds with
token classes. This is significant for large datasets because we need to do a bisect for each single row,
and if VNODES are used, the size of the token map can get quite large too.
"""
def __init__(self, ks, hostname, local_dc, session):
self.ks = ks
self.hostname = hostname
self.local_dc = local_dc
self.metadata = session.cluster.metadata
self._initialize_ring()
# Note that refresh metadata is disabled by default and we currenlty do not intercept it
# If hosts are added, removed or moved during a COPY operation our token map is no longer optimal
# However we can cope with hosts going down and up since we filter for replicas that are up when
# making each batch
def _initialize_ring(self):
token_map = self.metadata.token_map
if token_map is None:
self.ring = [0]
self.replicas = [(self.metadata.get_host(self.hostname),)]
self.pk_to_token_value = lambda pk: 0
return
token_map.rebuild_keyspace(self.ks, build_if_absent=True)
tokens_to_hosts = token_map.tokens_to_hosts_by_ks.get(self.ks, None)
from_key = token_map.token_class.from_key
self.ring = [token.value for token in token_map.ring]
self.replicas = [tuple(tokens_to_hosts[token]) for token in token_map.ring]
self.pk_to_token_value = lambda pk: from_key(pk).value
@staticmethod
def get_ring_pos(ring, val):
idx = bisect_right(ring, val)
return idx if idx < len(ring) else 0
def filter_replicas(self, hosts):
shuffled = tuple(sorted(hosts, key=lambda k: random.random()))
return [r for r in shuffled if r.is_up is not False and r.datacenter == self.local_dc] if hosts else ()
class FastTokenAwarePolicy(DCAwareRoundRobinPolicy):
"""
Send to any replicas attached to the query, or else fall back to DCAwareRoundRobinPolicy. Perform
exponential back-off if too many in flight requests to all replicas are already in progress.
"""
def __init__(self, parent):
DCAwareRoundRobinPolicy.__init__(self, parent.local_dc, 0)
self.max_backoff_attempts = parent.max_backoff_attempts
self.max_inflight_messages = parent.max_inflight_messages
def make_query_plan(self, working_keyspace=None, query=None):
"""
Extend TokenAwarePolicy.make_query_plan() so that we choose the same replicas in preference
and most importantly we avoid repeating the (slow) bisect. We also implement a backoff policy
by sleeping an exponentially larger delay in case all connections to eligible replicas have
too many in flight requests.
"""
connections = ConnectionWrapper.connections
replicas = list(query.replicas) if hasattr(query, 'replicas') else []
replicas.extend([r for r in DCAwareRoundRobinPolicy.make_query_plan(self, working_keyspace, query)
if r not in replicas])
if replicas:
def replica_is_not_overloaded(r):
if r.address in connections:
conn = connections[r.address]
return conn.in_flight < min(conn.max_request_id, self.max_inflight_messages)
return True
for i in range(self.max_backoff_attempts):
for r in filter(replica_is_not_overloaded, replicas):
yield r
# the back-off starts at 10 ms (0.01) and it can go up to to 2^max_backoff_attempts,
# which is currently 12, so 2^12 = 4096 = ~40 seconds when dividing by 0.01
delay = randint(1, pow(2, i + 1)) * 0.01
printdebugmsg("All replicas busy, sleeping for %d second(s)..." % (delay,))
time.sleep(delay)
printdebugmsg("Replicas too busy, given up")
class ConnectionWrapper(DefaultConnection):
"""
A wrapper to the driver default connection that helps in keeping track of messages in flight.
The newly created connection is registered into a global dictionary so that FastTokenAwarePolicy
is able to determine if a connection has too many in flight requests.
"""
connections = {}
def __init__(self, *args, **kwargs):
DefaultConnection.__init__(self, *args, **kwargs)
self.connections[self.host] = self
class ImportProcess(ChildProcess):
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
self.skip_columns = params['skip_columns']
self.valid_columns = [c for c in params['valid_columns']]
self.skip_column_indexes = [i for i, c in enumerate(self.columns) if c in self.skip_columns]
options = params['options']
self.nullval = options.copy['nullval']
self.max_attempts = options.copy['maxattempts']
self.min_batch_size = options.copy['minbatchsize']
self.max_batch_size = options.copy['maxbatchsize']
self.use_prepared_statements = options.copy['preparedstatements']
self.ttl = options.copy['ttl']
self.max_inflight_messages = options.copy['maxinflightmessages']
self.max_backoff_attempts = options.copy['maxbackoffattempts']
self.request_timeout = options.copy['requesttimeout']
self.dialect_options = options.dialect
self._session = None
self.query = None
self.conv = None
self.make_statement = None
@property
def session(self):
if not self._session:
cluster = Cluster(
contact_points=(self.hostname,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
load_balancing_policy=FastTokenAwarePolicy(self),
ssl_options=ssl_settings(self.hostname, self.config_file) if self.ssl else None,
default_retry_policy=FallthroughRetryPolicy(), # we throw on timeouts and retry in the error callback
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0,
connection_class=ConnectionWrapper)
self._session = cluster.connect(self.ks)
self._session.default_timeout = self.request_timeout
return self._session
def run(self):
if self.coverage:
self.start_coverage()
try:
pr = profile_on() if PROFILE_ON else None
self.on_fork()
self.inner_run(*self.make_params())
if pr:
profile_off(pr, file_name='worker_profile_%d.txt' % (os.getpid(),))
except Exception as exc:
self.report_error(exc)
finally:
if self.coverage:
self.stop_coverage()
self.close()
def close(self):
if self._session:
self._session.cluster.shutdown()
ChildProcess.close(self)
def is_counter(self, table_meta):
return "counter" in [table_meta.columns[name].cql_type for name in self.valid_columns]
def make_params(self):
metadata = self.session.cluster.metadata
table_meta = metadata.keyspaces[self.ks].tables[self.table]
prepared_statement = None
if self.is_counter(table_meta):
query = 'UPDATE %s.%s SET %%s WHERE %%s' % (protect_name(self.ks), protect_name(self.table))
make_statement = self.wrap_make_statement(self.make_counter_batch_statement)
elif self.use_prepared_statements:
query = 'INSERT INTO %s.%s (%s) VALUES (%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),),
', '.join(['?' for _ in self.valid_columns]))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
query = self.session.prepare(query)
query.consistency_level = self.consistency_level
prepared_statement = query
make_statement = self.wrap_make_statement(self.make_prepared_batch_statement)
else:
query = 'INSERT INTO %s.%s (%s) VALUES (%%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement)
query = ensure_str(query)
conv = ImportConversion(self, table_meta, prepared_statement)
tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session)
return query, conv, tm, make_statement
def inner_run(self, query, conv, tm, make_statement):
"""
Main run method. Note that we bind self methods that are called inside loops
for performance reasons.
"""
self.query = query
self.conv = conv
self.make_statement = make_statement
convert_rows = self.convert_rows
split_into_batches = self.split_into_batches
result_callback = self.result_callback
err_callback = self.err_callback
session = self.session
while True:
chunk = self.inmsg.recv()
if chunk is None:
break
try:
chunk['rows'] = convert_rows(conv, chunk)
for replicas, batch in split_into_batches(chunk, conv, tm):
statement = make_statement(query, conv, chunk, batch, replicas)
if statement:
future = session.execute_async(statement)
future.add_callbacks(callback=result_callback, callback_args=(batch, chunk),
errback=err_callback, errback_args=(batch, chunk, replicas))
# do not handle else case, if a statement could not be created, the exception is handled
# in self.wrap_make_statement and the error is reported, if a failure is injected that
# causes the statement to be None, then we should not report the error so that we can test
# the parent process handling missing batches from child processes
except Exception as exc:
self.report_error(exc, chunk, chunk['rows'])
def wrap_make_statement(self, inner_make_statement):
def make_statement(query, conv, chunk, batch, replicas):
try:
return inner_make_statement(query, conv, batch, replicas)
except Exception as exc:
print("Failed to make batch statement: {}".format(exc))
self.report_error(exc, chunk, batch['rows'])
return None
def make_statement_with_failures(query, conv, chunk, batch, replicas):
failed_batch, apply_failure = self.maybe_inject_failures(batch)
if apply_failure:
return failed_batch
return make_statement(query, conv, chunk, batch, replicas)
return make_statement_with_failures if self.test_failures else make_statement
def make_counter_batch_statement(self, query, conv, batch, replicas):
statement = BatchStatement(batch_type=BatchType.COUNTER, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
for row in batch['rows']:
where_clause = []
set_clause = []
for i, value in enumerate(row):
if i in conv.primary_key_indexes:
where_clause.append(ensure_text("{}={}").format(self.valid_columns[i], ensure_text(value)))
else:
set_clause.append(ensure_text("{}={}+{}").format(self.valid_columns[i], self.valid_columns[i], ensure_text(value)))
full_query_text = query % (ensure_text(',').join(set_clause), ensure_text(' AND ').join(where_clause))
statement.add(ensure_str(full_query_text))
return statement
def make_prepared_batch_statement(self, query, _, batch, replicas):
"""
Return a batch statement. This is an optimized version of:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
for row in batch['rows']:
statement.add(query, row)
We could optimize further by removing bound_statements altogether but we'd have to duplicate much
more driver's code (BoundStatement.bind()).
"""
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
statement._statements_and_parameters = [(True, query.query_id, query.bind(r).values) for r in batch['rows']]
return statement
def make_non_prepared_batch_statement(self, query, _, batch, replicas):
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
field_sep = b',' if six.PY2 else ','
statement._statements_and_parameters = [(False, query % (field_sep.join(r),), ()) for r in batch['rows']]
return statement
def convert_rows(self, conv, chunk):
"""
Return converted rows and report any errors during conversion.
"""
def filter_row_values(row):
return [v for i, v in enumerate(row) if i not in self.skip_column_indexes]
if self.skip_column_indexes:
rows = [filter_row_values(r) for r in list(csv.reader(chunk['rows'], **self.dialect_options))]
else:
rows = list(csv.reader(chunk['rows'], **self.dialect_options))
errors = defaultdict(list)
def convert_row(r):
try:
return conv.convert_row(r)
except Exception as err:
errors[err.message if hasattr(err, 'message') else str(err)].append(r)
return None
converted_rows = [_f for _f in [convert_row(r) for r in rows] if _f]
if errors:
for msg, rows in errors.items():
self.report_error(ParseError(msg), chunk, rows)
return converted_rows
def maybe_inject_failures(self, batch):
"""
Examine self.test_failures and see if the batch is a batch
supposed to cause a failure (failing_batch), or to terminate the worker process
(exit_batch), or not to be sent (unsent_batch).
@return any statement that will cause a failure or None if the statement should not be sent
plus a boolean indicating if a failure should be applied at all
"""
if 'failing_batch' in self.test_failures:
failing_batch = self.test_failures['failing_batch']
if failing_batch['id'] == batch['id']:
if batch['attempts'] < failing_batch['failures']:
statement = SimpleStatement("INSERT INTO badtable (a, b) VALUES (1, 2)",
consistency_level=self.consistency_level)
return statement, True # use this statement, which will cause an error
if 'exit_batch' in self.test_failures:
exit_batch = self.test_failures['exit_batch']
if exit_batch['id'] == batch['id']:
sys.exit(1)
if 'unsent_batch' in self.test_failures:
unsent_batch = self.test_failures['unsent_batch']
if unsent_batch['id'] == batch['id']:
return None, True # do not send this batch, which will cause missing acks in the parent process
return None, False # carry on as normal, do not apply any failures
@staticmethod
def make_batch(batch_id, rows, attempts=1):
return {'id': batch_id, 'rows': rows, 'attempts': attempts}
def split_into_batches(self, chunk, conv, tm):
"""
Batch rows by ring position or replica.
If there are at least min_batch_size rows for a ring position then split these rows into
groups of max_batch_size and send a batch for each group, using all replicas for this ring position.
Otherwise, we are forced to batch by replica, and here unfortunately we can only choose one replica to
guarantee common replicas across partition keys. We are typically able
to batch by ring position for small clusters or when VNODES are not used. For large clusters with VNODES
it may not be possible, in this case it helps to increase the CHUNK SIZE but up to a limit, otherwise
we may choke the cluster.
"""
rows_by_ring_pos = defaultdict(list)
errors = defaultdict(list)
min_batch_size = self.min_batch_size
max_batch_size = self.max_batch_size
ring = tm.ring
get_row_partition_key_values = conv.get_row_partition_key_values_fcn()
pk_to_token_value = tm.pk_to_token_value
get_ring_pos = tm.get_ring_pos
make_batch = self.make_batch
for row in chunk['rows']:
try:
pk = get_row_partition_key_values(row)
rows_by_ring_pos[get_ring_pos(ring, pk_to_token_value(pk))].append(row)
except Exception as e:
errors[e.message if hasattr(e, 'message') else str(e)].append(row)
if errors:
for msg, rows in errors.items():
self.report_error(ParseError(msg), chunk, rows)
replicas = tm.replicas
filter_replicas = tm.filter_replicas
rows_by_replica = defaultdict(list)
for ring_pos, rows in rows_by_ring_pos.items():
if len(rows) > min_batch_size:
for i in range(0, len(rows), max_batch_size):
yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size])
else:
# select only the first valid replica to guarantee more overlap or none at all
rows_by_replica[tuple(filter_replicas(replicas[ring_pos])[:1])].extend(rows) # TODO: revisit tuple wrapper
# Now send the batches by replica
for replicas, rows in rows_by_replica.items():
for i in range(0, len(rows), max_batch_size):
yield replicas, make_batch(chunk['id'], rows[i:i + max_batch_size])
def result_callback(self, _, batch, chunk):
self.update_chunk(batch['rows'], chunk)
def err_callback(self, response, batch, chunk, replicas):
if isinstance(response, OperationTimedOut) and chunk['imported'] == chunk['num_rows_sent']:
return # occasionally the driver sends false timeouts for rows already processed (PYTHON-652)
err_is_final = batch['attempts'] >= self.max_attempts
self.report_error(response, chunk, batch['rows'], batch['attempts'], err_is_final)
if not err_is_final:
batch['attempts'] += 1
statement = self.make_statement(self.query, self.conv, chunk, batch, replicas)
future = self.session.execute_async(statement)
future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk),
errback=self.err_callback, errback_args=(batch, chunk, replicas))
def report_error(self, err, chunk=None, rows=None, attempts=1, final=True):
if self.debug and sys.exc_info()[1] == err:
traceback.print_exc()
err_msg = err.message if hasattr(err, 'message') else str(err)
self.outmsg.send(ImportTaskError(err.__class__.__name__, err_msg, rows, attempts, final))
if final and chunk is not None:
self.update_chunk(rows, chunk)
def update_chunk(self, rows, chunk):
chunk['imported'] += len(rows)
if chunk['imported'] == chunk['num_rows_sent']:
self.outmsg.send(ImportProcessResult(chunk['num_rows_sent']))
class RateMeter(object):
def __init__(self, log_fcn, update_interval=0.25, log_file=''):
self.log_fcn = log_fcn # the function for logging, may be None to disable logging
self.update_interval = update_interval # how often we update in seconds
self.log_file = log_file # an optional file where to log statistics in addition to stdout
self.start_time = time.time() # the start time
self.last_checkpoint_time = self.start_time # last time we logged
self.current_rate = 0.0 # rows per second
self.current_record = 0 # number of records since we last updated
self.total_records = 0 # total number of records
if os.path.isfile(self.log_file):
os.unlink(self.log_file)
def increment(self, n=1):
self.current_record += n
self.maybe_update()
def maybe_update(self, sleep=False):
if self.current_record == 0:
return
new_checkpoint_time = time.time()
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= self.update_interval:
self.update(new_checkpoint_time)
self.log_message()
elif sleep:
remaining_time = time_difference - self.update_interval
if remaining_time > 0.000001:
time.sleep(remaining_time)
def update(self, new_checkpoint_time):
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= 1e-09:
self.current_rate = self.get_new_rate(self.current_record / time_difference)
self.last_checkpoint_time = new_checkpoint_time
self.total_records += self.current_record
self.current_record = 0
def get_new_rate(self, new_rate):
"""
return the rate of the last period: this is the new rate but
averaged with the last rate to smooth a bit
"""
if self.current_rate == 0.0:
return new_rate
else:
return (self.current_rate + new_rate) / 2.0
def get_avg_rate(self):
"""
return the average rate since we started measuring
"""
time_difference = time.time() - self.start_time
return self.total_records / time_difference if time_difference >= 1e-09 else 0
def log_message(self):
if not self.log_fcn:
return
output = 'Processed: %d rows; Rate: %7.0f rows/s; Avg. rate: %7.0f rows/s\r' % \
(self.total_records, self.current_rate, self.get_avg_rate())
self.log_fcn(output, eol='\r')
if self.log_file:
with open(self.log_file, "a") as f:
f.write(output + '\n')
def get_total_records(self):
self.update(time.time())
self.log_message()
return self.total_records
|
aavanian/bokeh
|
refs/heads/master
|
sphinx/source/docs/user_guide/examples/styling_func_tick_formatter.py
|
16
|
from bokeh.models import FuncTickFormatter
from bokeh.plotting import figure, show, output_file
output_file("formatter.html")
p = figure(plot_width=500, plot_height=500)
p.circle([0, 2, 4, 6, 8, 10], [6, 2, 4, 10, 8, 0], size=30)
p.yaxis.formatter = FuncTickFormatter(code="""
return Math.floor(tick) + " + " + (tick % 1).toFixed(2)
""")
show(p)
|
Goldmund-Wyldebeast-Wunderliebe/django-diazo
|
refs/heads/master
|
django_diazo/apps.py
|
1
|
import imp
from django.apps import AppConfig
from django.conf import settings
from django_diazo.settings import MODULE_NAME
class DjangoDiazoConfig(AppConfig):
name = 'django_diazo'
label = 'django_diazo'
verbose_name = "Django Diazo"
def ready(self):
"""
Implement this method to run code when Django starts.
Autodiscovers the 'diazo' module in project apps.
"""
for app in settings.INSTALLED_APPS:
try:
app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__
except AttributeError:
continue
try:
imp.find_module(MODULE_NAME, app_path)
except ImportError:
continue
__import__('%s.%s' % (app, MODULE_NAME))
|
Vixionar/django
|
refs/heads/master
|
tests/template_tests/syntax_tests/test_cycle.py
|
199
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from ..utils import setup
class CycleTagTests(SimpleTestCase):
libraries = {'future': 'django.templatetags.future'}
@setup({'cycle01': '{% cycle a %}'})
def test_cycle01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle01')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle02': '{% cycle a,b,c as abc %}{% cycle abc %}'})
def test_cycle02(self):
output = self.engine.render_to_string('cycle02')
self.assertEqual(output, 'ab')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle03': '{% cycle a,b,c as abc %}{% cycle abc %}{% cycle abc %}'})
def test_cycle03(self):
output = self.engine.render_to_string('cycle03')
self.assertEqual(output, 'abc')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle04': '{% cycle a,b,c as abc %}{% cycle abc %}{% cycle abc %}{% cycle abc %}'})
def test_cycle04(self):
output = self.engine.render_to_string('cycle04')
self.assertEqual(output, 'abca')
@setup({'cycle05': '{% cycle %}'})
def test_cycle05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle05')
@setup({'cycle06': '{% cycle a %}'})
def test_cycle06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle06')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle07': '{% cycle a,b,c as foo %}{% cycle bar %}'})
def test_cycle07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle07')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle08': '{% cycle a,b,c as foo %}{% cycle foo %}{{ foo }}{{ foo }}{% cycle foo %}{{ foo }}'})
def test_cycle08(self):
output = self.engine.render_to_string('cycle08')
self.assertEqual(output, 'abbbcc')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle09': '{% for i in test %}{% cycle a,b %}{{ i }},{% endfor %}'})
def test_cycle09(self):
output = self.engine.render_to_string('cycle09', {'test': list(range(5))})
self.assertEqual(output, 'a0,b1,a2,b3,a4,')
@setup({'cycle10': "{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}"})
def test_cycle10(self):
output = self.engine.render_to_string('cycle10')
self.assertEqual(output, 'ab')
@setup({'cycle11': "{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}{% cycle abc %}"})
def test_cycle11(self):
output = self.engine.render_to_string('cycle11')
self.assertEqual(output, 'abc')
@setup({'cycle12': "{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}{% cycle abc %}{% cycle abc %}"})
def test_cycle12(self):
output = self.engine.render_to_string('cycle12')
self.assertEqual(output, 'abca')
@setup({'cycle13': "{% for i in test %}{% cycle 'a' 'b' %}{{ i }},{% endfor %}"})
def test_cycle13(self):
output = self.engine.render_to_string('cycle13', {'test': list(range(5))})
self.assertEqual(output, 'a0,b1,a2,b3,a4,')
@setup({'cycle14': '{% cycle one two as foo %}{% cycle foo %}'})
def test_cycle14(self):
output = self.engine.render_to_string('cycle14', {'one': '1', 'two': '2'})
self.assertEqual(output, '12')
@setup({'cycle15': '{% for i in test %}{% cycle aye bee %}{{ i }},{% endfor %}'})
def test_cycle15(self):
output = self.engine.render_to_string('cycle15', {'test': list(range(5)), 'aye': 'a', 'bee': 'b'})
self.assertEqual(output, 'a0,b1,a2,b3,a4,')
@setup({'cycle16': '{% cycle one|lower two as foo %}{% cycle foo %}'})
def test_cycle16(self):
output = self.engine.render_to_string('cycle16', {'one': 'A', 'two': '2'})
self.assertEqual(output, 'a2')
@setup({'cycle17': "{% cycle 'a' 'b' 'c' as abc silent %}"
"{% cycle abc %}{% cycle abc %}{% cycle abc %}{% cycle abc %}"})
def test_cycle17(self):
output = self.engine.render_to_string('cycle17')
self.assertEqual(output, '')
@setup({'cycle18': "{% cycle 'a' 'b' 'c' as foo invalid_flag %}"})
def test_cycle18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle18')
@setup({'cycle19': "{% cycle 'a' 'b' as silent %}{% cycle silent %}"})
def test_cycle19(self):
output = self.engine.render_to_string('cycle19')
self.assertEqual(output, 'ab')
@setup({'cycle20': '{% cycle one two as foo %} & {% cycle foo %}'})
def test_cycle20(self):
output = self.engine.render_to_string('cycle20', {'two': 'C & D', 'one': 'A & B'})
self.assertEqual(output, 'A & B & C & D')
@setup({'cycle21': '{% filter force_escape %}'
'{% cycle one two as foo %} & {% cycle foo %}{% endfilter %}'})
def test_cycle21(self):
output = self.engine.render_to_string('cycle21', {'two': 'C & D', 'one': 'A & B'})
self.assertEqual(output, 'A &amp; B & C &amp; D')
@setup({'cycle22': "{% for x in values %}{% cycle 'a' 'b' 'c' as abc silent %}{{ x }}{% endfor %}"})
def test_cycle22(self):
output = self.engine.render_to_string('cycle22', {'values': [1, 2, 3, 4]})
self.assertEqual(output, '1234')
@setup({'cycle23': "{% for x in values %}"
"{% cycle 'a' 'b' 'c' as abc silent %}{{ abc }}{{ x }}{% endfor %}"})
def test_cycle23(self):
output = self.engine.render_to_string('cycle23', {'values': [1, 2, 3, 4]})
self.assertEqual(output, 'a1b2c3a4')
@setup({
'cycle24': "{% for x in values %}"
"{% cycle 'a' 'b' 'c' as abc silent %}{% include 'included-cycle' %}{% endfor %}",
'included-cycle': '{{ abc }}',
})
def test_cycle24(self):
output = self.engine.render_to_string('cycle24', {'values': [1, 2, 3, 4]})
self.assertEqual(output, 'abca')
@setup({'cycle25': '{% cycle a as abc %}'})
def test_cycle25(self):
output = self.engine.render_to_string('cycle25', {'a': '<'})
self.assertEqual(output, '<')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle26': '{% load cycle from future %}{% cycle a b as ab %}{% cycle ab %}'})
def test_cycle26(self):
output = self.engine.render_to_string('cycle26', {'a': '<', 'b': '>'})
self.assertEqual(output, '<>')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle27': '{% load cycle from future %}'
'{% autoescape off %}{% cycle a b as ab %}{% cycle ab %}{% endautoescape %}'})
def test_cycle27(self):
output = self.engine.render_to_string('cycle27', {'a': '<', 'b': '>'})
self.assertEqual(output, '<>')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'cycle28': '{% load cycle from future %}{% cycle a|safe b as ab %}{% cycle ab %}'})
def test_cycle28(self):
output = self.engine.render_to_string('cycle28', {'a': '<', 'b': '>'})
self.assertEqual(output, '<>')
|
duhzecca/cinder
|
refs/heads/master
|
cinder/tests/unit/test_backup.py
|
3
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Backup code."""
import ddt
import tempfile
import uuid
import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import timeutils
from cinder.backup import api
from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_service_with_verify as fake_service
from cinder.volume.drivers import lvm
CONF = cfg.CONF
class FakeBackupException(Exception):
pass
class BaseBackupTest(test.TestCase):
def setUp(self):
super(BaseBackupTest, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir)
self.backup_mgr = importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.ctxt = context.get_admin_context()
self.backup_mgr.driver.set_initialized()
def _create_backup_db_entry(self, volume_id=1, display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status='creating',
size=1,
object_count=0,
project_id='fake',
service=None,
temp_volume_id=None,
temp_snapshot_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['user_id'] = 'fake'
kwargs['project_id'] = project_id
kwargs['host'] = 'testhost'
kwargs['availability_zone'] = '1'
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['container'] = container
kwargs['status'] = status
kwargs['fail_reason'] = ''
kwargs['service'] = service or CONF.backup_driver
kwargs['snapshot'] = False
kwargs['parent_id'] = None
kwargs['size'] = size
kwargs['object_count'] = object_count
kwargs['temp_volume_id'] = temp_volume_id
kwargs['temp_snapshot_id'] = temp_snapshot_id
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume',
status='backing-up',
previous_status='available',
size=1):
"""Create a volume entry in the DB.
Return the entry ID
"""
vol = {}
vol['size'] = size
vol['host'] = 'testhost'
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['availability_zone'] = '1'
vol['previous_status'] = previous_status
return db.volume_create(self.ctxt, vol)['id']
def _create_snapshot_db_entry(self, display_name='test_snapshot',
display_description='test snapshot',
status='available',
size=1,
volume_id='1',
provider_location=None):
"""Create a snapshot entry in the DB.
Return the entry ID.
"""
kwargs = {}
kwargs['size'] = size
kwargs['host'] = 'testhost'
kwargs['user_id'] = 'fake'
kwargs['project_id'] = 'fake'
kwargs['status'] = status
kwargs['display_name'] = display_name
kwargs['display_description'] = display_description
kwargs['volume_id'] = volume_id
kwargs['cgsnapshot_id'] = None
kwargs['volume_size'] = size
kwargs['provider_location'] = provider_location
snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs)
snapshot_obj.create()
return snapshot_obj
def _create_volume_attach(self, volume_id):
values = {'volume_id': volume_id,
'attach_status': 'attached', }
attachment = db.volume_attach(self.ctxt, values)
db.volume_attached(self.ctxt, attachment['id'], None, 'testhost',
'/dev/vd0')
def _create_exported_record_entry(self, vol_size=1, exported_id=None):
"""Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
if exported_id is not None:
backup.id = exported_id
export = self.backup_mgr.export_record(self.ctxt, backup)
return export
def _create_export_record_db_entry(self,
volume_id='0000',
status='creating',
project_id='fake',
backup_id=None):
"""Create a backup entry in the DB.
Return the entry ID
"""
kwargs = {}
kwargs['volume_id'] = volume_id
kwargs['user_id'] = 'fake'
kwargs['project_id'] = project_id
kwargs['status'] = status
if backup_id:
kwargs['id'] = backup_id
backup = objects.BackupImport(context=self.ctxt, **kwargs)
backup.create()
return backup
@ddt.ddt
class BackupTestCase(BaseBackupTest):
"""Test Case for backups."""
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_snapshot')
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_volume')
def test_init_host(self, mock_delete_volume, mock_delete_snapshot):
"""Test stuck volumes and backups.
Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called
"""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
vol2_id = self._create_volume_db_entry()
self._create_volume_attach(vol2_id)
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
vol3_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol3_id, {'status': 'available'})
vol4_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'})
temp_vol_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'})
vol5_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'})
temp_snap = self._create_snapshot_db_entry()
temp_snap.status = 'available'
temp_snap.save()
vol6_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol6_id, {'status': 'restoring-backup'})
backup1 = self._create_backup_db_entry(status='creating',
volume_id=vol1_id)
backup2 = self._create_backup_db_entry(status='restoring',
volume_id=vol2_id)
backup3 = self._create_backup_db_entry(status='deleting',
volume_id=vol3_id)
self._create_backup_db_entry(status='creating',
volume_id=vol4_id,
temp_volume_id=temp_vol_id)
self._create_backup_db_entry(status='creating',
volume_id=vol5_id,
temp_snapshot_id=temp_snap.id)
self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual('available', vol1['status'])
vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual('error_restoring', vol2['status'])
vol3 = db.volume_get(self.ctxt, vol3_id)
self.assertEqual('available', vol3['status'])
vol4 = db.volume_get(self.ctxt, vol4_id)
self.assertEqual('available', vol4['status'])
vol5 = db.volume_get(self.ctxt, vol5_id)
self.assertEqual('available', vol5['status'])
vol6 = db.volume_get(self.ctxt, vol6_id)
self.assertEqual('error_restoring', vol6['status'])
backup1 = db.backup_get(self.ctxt, backup1.id)
self.assertEqual('error', backup1['status'])
backup2 = db.backup_get(self.ctxt, backup2.id)
self.assertEqual('available', backup2['status'])
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup3.id)
self.assertTrue(mock_delete_volume.called)
self.assertTrue(mock_delete_snapshot.called)
@mock.patch('cinder.objects.backup.BackupList.get_all_by_host')
@mock.patch('cinder.manager.SchedulerDependentManager._add_to_threadpool')
def test_init_host_with_service_inithost_offload(self,
mock_add_threadpool,
mock_get_all_by_host):
self.override_config('backup_service_inithost_offload', True)
vol1_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol1_id, {'status': 'available'})
backup1 = self._create_backup_db_entry(status='deleting',
volume_id=vol1_id)
vol2_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol2_id, {'status': 'available'})
backup2 = self._create_backup_db_entry(status='deleting',
volume_id=vol2_id)
mock_get_all_by_host.return_value = [backup1, backup2]
self.backup_mgr.init_host()
calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1),
mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)]
mock_add_threadpool.assert_has_calls(calls, any_order=True)
self.assertEqual(2, mock_add_threadpool.call_count)
@mock.patch.object(db, 'volume_get')
@ddt.data(KeyError, exception.VolumeNotFound)
def test_cleanup_temp_volumes_snapshots_volume_not_found(
self, err, mock_volume_get):
"""Ensure we handle missing volume for a backup."""
mock_volume_get.side_effect = [err]
backup1 = self._create_backup_db_entry(status='creating')
backups = [backup1]
self.assertIsNone(self.backup_mgr._cleanup_temp_volumes_snapshots(
backups))
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_snapshot')
def test_cleanup_temp_snapshot_not_found(self,
mock_delete_snapshot):
"""Ensure we handle missing temp snapshot for a backup."""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
backup1 = self._create_backup_db_entry(status='error',
volume_id=vol1_id,
temp_snapshot_id='fake')
backups = [backup1]
self.assertEqual('fake', backups[0].temp_snapshot_id)
self.assertIsNone(self.backup_mgr._cleanup_temp_volumes_snapshots(
backups))
self.assertFalse(mock_delete_snapshot.called)
self.assertIsNone(backups[0].temp_snapshot_id)
backup1.destroy()
db.volume_destroy(self.ctxt, vol1_id)
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_volume')
def test_cleanup_temp_volume_not_found(self,
mock_delete_volume):
"""Ensure we handle missing temp volume for a backup."""
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
backup1 = self._create_backup_db_entry(status='error',
volume_id=vol1_id,
temp_volume_id='fake')
backups = [backup1]
self.assertEqual('fake', backups[0].temp_volume_id)
self.assertIsNone(self.backup_mgr._cleanup_temp_volumes_snapshots(
backups))
self.assertFalse(mock_delete_volume.called)
self.assertIsNone(backups[0].temp_volume_id)
backup1.destroy()
db.volume_destroy(self.ctxt, vol1_id)
def test_create_backup_with_bad_volume_status(self):
"""Test creating a backup from a volume with a bad status."""
vol_id = self._create_volume_db_entry(status='restoring', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup,
self.ctxt,
backup)
def test_create_backup_with_bad_backup_status(self):
"""Test creating a backup with a backup with a bad status."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup,
self.ctxt,
backup)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_error(self, _mock_volume_backup):
"""Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
_mock_volume_backup.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup,
self.ctxt,
backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
self.assertEqual('error_backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
self.assertTrue(_mock_volume_backup.called)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup(self, _mock_volume_backup):
"""Test normal backup creation."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
self.assertEqual('backing-up', vol['previous_status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
self.assertEqual(vol_size, backup['size'])
self.assertTrue(_mock_volume_backup.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_notify(self, _mock_volume_backup, notify):
"""Test normal backup creation with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
def test_restore_backup_with_bad_volume_status(self):
"""Test error handling.
Test error handling when restoring a backup to a volume
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available', size=1)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
def test_restore_backup_with_bad_backup_status(self):
"""Test error handling.
Test error handling when restoring a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_driver_error(self, _mock_volume_restore):
"""Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
_mock_volume_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error_restoring', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
self.assertTrue(_mock_volume_restore.called)
def test_restore_backup_with_bad_service(self):
"""Test error handling.
Test error handling when attempting a restore of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup,
self.ctxt,
backup,
vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup(self, _mock_volume_restore):
"""Test normal backup restoration."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('available', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
self.assertTrue(_mock_volume_restore.called)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
def test_restore_backup_with_notify(self, _mock_volume_restore, notify):
"""Test normal backup restoration with notifications."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size)
backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
self.assertEqual(2, notify.call_count)
def test_delete_backup_with_bad_backup_status(self):
"""Test error handling.
Test error handling when deleting a backup with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='deleting',
display_name='fail_on_delete',
volume_id=vol_id)
self.assertRaises(IOError,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
def test_delete_backup_with_bad_service(self):
"""Test error handling.
Test error handling when attempting a delete of a backup
with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup,
self.ctxt,
backup)
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
def test_delete_backup_with_no_service(self):
"""Test error handling.
Test error handling when attempting a delete of a backup
with no service defined for that backup, relates to bug #1162908
"""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
backup.service = None
backup.save()
self.backup_mgr.delete_backup(self.ctxt, backup)
def test_delete_backup(self):
"""Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound,
db.backup_get,
self.ctxt,
backup.id)
ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup.id)
self.assertEqual(True, backup.deleted)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual('deleted', backup.status)
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1)
backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count)
def test_list_backup(self):
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(0, len(backups))
self._create_backup_db_entry()
b2 = self._create_backup_db_entry(project_id='project1')
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(1, len(backups))
self.assertEqual(backups[0].id, b2.id)
def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'.
"""
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(0, len(backups))
backup_keep = self._create_backup_db_entry()
backup = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(1, len(backups))
self.assertEqual(backups[0].id, backup_keep.id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake')
self.assertEqual(2, len(backups))
def test_backup_get_all_by_host_with_deleted(self):
"""Test deleted backups.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'
"""
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(0, len(backups))
backup_keep = self._create_backup_db_entry()
backup = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(1, len(backups))
self.assertEqual(backups[0].id, backup_keep.id)
ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
self.assertEqual(2, len(backups))
def test_backup_manager_driver_name(self):
"""Test mapping between backup services and backup drivers."""
self.override_config('backup_driver', "cinder.backup.services.swift")
backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.assertEqual('cinder.backup.drivers.swift',
backup_mgr.driver_name)
def test_export_record_with_bad_service(self):
"""Test error handling.
Test error handling when attempting an export of a backup
record with a different service to that used to create the backup.
"""
vol_id = self._create_volume_db_entry(size=1)
service = 'cinder.tests.backup.bad_service'
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id,
service=service)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup)
def test_export_record_with_bad_backup_status(self):
"""Test error handling.
Test error handling when exporting a backup record with a backup
with a bad status.
"""
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(status='error',
volume_id=vol_id)
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record,
self.ctxt,
backup)
def test_export_record(self):
"""Test normal backup record export."""
vol_size = 1
vol_id = self._create_volume_db_entry(status='available',
size=vol_size)
backup = self._create_backup_db_entry(status='available',
volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup)
self.assertEqual(CONF.backup_driver, export['backup_service'])
self.assertTrue('backup_url' in export)
def test_import_record_with_verify_not_implemented(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(vol_size=vol_size,
exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual('available', backup['status'])
self.assertEqual(vol_size, backup['size'])
def test_import_record_with_wrong_id(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver does not support verify.
"""
vol_size = 1
export = self._create_exported_record_entry(vol_size=vol_size)
imported_record = self._create_export_record_db_entry()
backup_hosts = []
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
def test_import_record_with_bad_service(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record with a different service to that used to create the backup.
"""
export = self._create_exported_record_entry()
export['backup_service'] = 'cinder.tests.unit.backup.bad_service'
imported_record = self._create_export_record_db_entry()
# Test the case where the additional hosts list is empty
backup_hosts = []
self.assertRaises(exception.ServiceNotFound,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
# Test that the import backup keeps calling other hosts to find a
# suitable host for the backup service
backup_hosts = ['fake1', 'fake2']
backup_hosts_expect = list(backup_hosts)
BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record'
with mock.patch(BackupAPI_import) as _mock_backup_import:
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
next_host = backup_hosts_expect.pop()
_mock_backup_import.assert_called_once_with(
self.ctxt,
next_host,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts_expect)
def test_import_record_with_invalid_backup(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
export = self._create_exported_record_entry()
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_record_import_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'import_record'))
imported_record = self._create_export_record_db_entry()
backup_hosts = []
with mock.patch(_mock_record_import_class) as _mock_record_import:
_mock_record_import.side_effect = FakeBackupException('fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_import.called)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual('error', backup['status'])
def test_not_supported_driver_to_force_delete(self):
"""Test force delete check method for not supported drivers."""
self.override_config('backup_driver', 'cinder.backup.drivers.ceph')
self.backup_mgr = importutils.import_object(CONF.backup_manager)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertFalse(result)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_init_backup_repo_path', return_value=None)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_check_configuration', return_value=None)
def test_check_support_to_force_delete(self, mock_check_configuration,
mock_init_backup_repo_path):
"""Test force delete check method for supported drivers."""
self.override_config('backup_driver', 'cinder.backup.drivers.nfs')
self.backup_mgr = importutils.import_object(CONF.backup_manager)
result = self.backup_mgr.check_support_to_force_delete(self.ctxt)
self.assertTrue(result)
def test_backup_has_dependent_backups(self):
"""Test backup has dependent backups.
Test the query of has_dependent_backups in backup object is correct.
"""
vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size)
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertFalse(backup.has_dependent_backups)
class BackupTestCaseWithVerify(BaseBackupTest):
"""Test Case for backups."""
def setUp(self):
self.override_config(
"backup_driver",
"cinder.tests.unit.backup.fake_service_with_verify")
super(BackupTestCaseWithVerify, self).setUp()
def test_import_record_with_verify(self):
"""Test normal backup record import.
Test the case when import succeeds for the case that the
driver implements verify.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(
vol_size=vol_size, exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class):
self.backup_mgr.import_record(self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual('available', backup['status'])
self.assertEqual(vol_size, backup['size'])
def test_import_record_with_verify_invalid_backup(self):
"""Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception.
"""
vol_size = 1
backup_id = uuid.uuid4()
export = self._create_exported_record_entry(
vol_size=vol_size, exported_id=backup_id)
imported_record = self._create_export_record_db_entry(
backup_id=backup_id)
backup_hosts = []
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as _mock_record_verify:
_mock_record_verify.side_effect = \
exception.InvalidBackup(reason='fake')
self.assertRaises(exception.InvalidBackup,
self.backup_mgr.import_record,
self.ctxt,
imported_record,
export['backup_service'],
export['backup_url'],
backup_hosts)
self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual('error', backup['status'])
def test_backup_reset_status_from_nonrestoring_to_available(
self):
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup = self._create_backup_db_entry(status='error',
volume_id=vol_id)
with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
fake_service.get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
backup,
'available')
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
def test_backup_reset_status_to_available_invalid_backup(self):
volume = db.volume_create(self.ctxt, {'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(status='error',
volume_id=volume['id'])
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as \
_mock_record_verify:
_mock_record_verify.side_effect = \
exception.BackupVerifyUnsupportedDriver(reason='fake')
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status,
self.ctxt,
backup,
'available')
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('error', backup['status'])
def test_backup_reset_status_from_restoring_to_available(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(status='restoring',
volume_id=volume['id'])
self.backup_mgr.reset_status(self.ctxt, backup, 'available')
backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual('available', backup['status'])
def test_backup_reset_status_to_error(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = self._create_backup_db_entry(status='creating',
volume_id=volume['id'])
self.backup_mgr.reset_status(self.ctxt, backup, 'error')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual('error', backup['status'])
@ddt.ddt
class BackupAPITestCase(BaseBackupTest):
def setUp(self):
super(BackupAPITestCase, self).setUp()
self.api = api.API()
def test_get_all_wrong_all_tenants_value(self):
self.assertRaises(exception.InvalidParameterValue,
self.api.get_all, self.ctxt, {'all_tenants': 'bad'})
@mock.patch.object(objects, 'BackupList')
def test_get_all_no_all_tenants_value(self, mock_backuplist):
result = self.api.get_all(self.ctxt, {'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None,
None, None, None)
@mock.patch.object(objects, 'BackupList')
@ddt.data(False, 'false', '0', 0, 'no')
def test_get_all_false_value_all_tenants(
self, false_value, mock_backuplist):
result = self.api.get_all(self.ctxt, {'all_tenants': false_value,
'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None,
None, None, None)
@mock.patch.object(objects, 'BackupList')
@ddt.data(True, 'true', '1', 1, 'yes')
def test_get_all_true_value_all_tenants(
self, true_value, mock_backuplist):
result = self.api.get_all(self.ctxt, {'all_tenants': true_value,
'key': 'value'})
self.assertFalse(mock_backuplist.get_all_by_project.called)
self.assertEqual(mock_backuplist.get_all.return_value,
result)
mock_backuplist.get_all.assert_called_once_with(
self.ctxt, {'key': 'value'}, None, None, None, None, None)
@mock.patch.object(objects, 'BackupList')
def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist):
ctxt = context.RequestContext('fake', 'fake')
result = self.api.get_all(ctxt, {'all_tenants': '1',
'key': 'value'})
self.assertFalse(mock_backuplist.get_all.called)
self.assertEqual(mock_backuplist.get_all_by_project.return_value,
result)
mock_backuplist.get_all_by_project.assert_called_once_with(
ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None,
None)
|
OTWillems/GEO1005
|
refs/heads/master
|
TwisterSolutions/utility_functions.py
|
1
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
SpatialDecision
A QGIS plugin
This is a SDSS template for the GEO1005 course
-------------------
begin : 2015-11-02
git sha : $Format:%H$
copyright : (C) 2015 by Jorge Gil, TU Delft
email : j.a.lopesgil@tudelft.nl
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from qgis.networkanalysis import *
from pyspatialite import dbapi2 as sqlite
import psycopg2 as pgsql
import numpy as np
import math
import os.path
try:
import networkx as nx
has_networkx = True
except ImportError, e:
has_networkx = False
#
# Layer functions
#
def getLegendLayers(iface, geom='all', provider='all'):
"""Return list of valid QgsVectorLayer in QgsLegendInterface, with specific geometry type and/or data provider"""
layers_list = []
for layer in iface.legendInterface().layers():
add_layer = False
if layer.isValid() and layer.type() == QgsMapLayer.VectorLayer:
if layer.hasGeometryType() and (geom is 'all' or layer.geometryType() in geom):
if provider is 'all' or layer.dataProvider().name() in provider:
add_layer = True
if add_layer:
layers_list.append(layer)
return layers_list
def getCanvasLayers(iface, geom='all', provider='all'):
"""Return list of valid QgsVectorLayer in QgsMapCanvas, with specific geometry type and/or data provider"""
layers_list = []
for layer in iface.mapCanvas().layers():
add_layer = False
if layer.isValid() and layer.type() == QgsMapLayer.VectorLayer:
if layer.hasGeometryType() and (geom is 'all' or layer.geometryType() in geom):
if provider is 'all' or layer.dataProvider().name() in provider:
add_layer = True
if add_layer:
layers_list.append(layer)
return layers_list
def getRegistryLayers(geom='all', provider='all'):
"""Return list of valid QgsVectorLayer in QgsMapLayerRegistry, with specific geometry type and/or data provider"""
layers_list = []
for layer in QgsMapLayerRegistry.instance().mapLayers().values():
add_layer = False
if layer.isValid() and layer.type() == QgsMapLayer.VectorLayer:
if layer.hasGeometryType() and (geom is 'all' or layer.geometryType() in geom):
if provider is 'all' or layer.dataProvider().name() in provider:
add_layer = True
if add_layer:
layers_list.append(layer)
return layers_list
def isLayerProjected(layer):
projected = False
if layer:
projected = not layer.crs().geographicFlag()
return projected
def getLegendLayerByName(iface, name):
layer = None
for i in iface.legendInterface().layers():
if i.name() == name:
layer = i
return layer
def getCanvasLayerByName(iface, name):
layer = None
for i in iface.mapCanvas().layers():
if i.name() == name:
layer = i
return layer
def getLayersListNames(layerslist):
layer_names = [layer.name() for layer in layerslist]
return layer_names
def getLayerPath(layer):
path = ''
provider = layer.dataProvider()
provider_type = provider.name()
if provider_type == 'spatialite':
uri = QgsDataSourceURI(provider.dataSourceUri())
path = uri.database()
elif provider_type == 'ogr':
uri = provider.dataSourceUri()
path = os.path.dirname(uri)
return path
def reloadLayer(layer):
layer_name = layer.name()
layer_provider = layer.dataProvider().name()
new_layer = None
if layer_provider in ('spatialite','postgres'):
uri = QgsDataSourceURI(layer.dataProvider().dataSourceUri())
new_layer = QgsVectorLayer(uri.uri(), layer_name, layer_provider)
elif layer_provider == 'ogr':
uri = layer.dataProvider().dataSourceUri()
new_layer = QgsVectorLayer(uri.split("|")[0], layer_name, layer_provider)
QgsMapLayerRegistry.instance().removeMapLayer(layer.id())
if new_layer:
QgsMapLayerRegistry.instance().addMapLayer(new_layer)
return new_layer
#
# Field functions
#
def fieldExists(layer, name):
fields = getFieldNames(layer)
if name in fields:
return True
else:
return False
def getFieldNames(layer):
fields_list = []
if layer and layer.dataProvider():
fields_list = [field.name() for field in layer.dataProvider().fields()]
return fields_list
def getNumericFields(layer, type='all'):
fields = []
if type == 'all':
types = (QtCore.QVariant.Int, QtCore.QVariant.LongLong, QtCore.QVariant.Double,
QtCore.QVariant.UInt, QtCore.QVariant.ULongLong)
else:
types = (type)
if layer and layer.dataProvider():
for field in layer.dataProvider().fields():
if field.type() in types:
fields.append(field)
return fields
def getFieldIndex(layer, name):
idx = layer.dataProvider().fields().indexFromName(name)
return idx
def fieldHasValues(layer, name):
if layer and fieldExists(layer, name):
# find fields that only have NULL values
idx = getFieldIndex(layer, name)
maxval = layer.maximumValue(idx)
minval = layer.minimumValue(idx)
if maxval == NULL and minval == NULL:
return False
else:
return True
def fieldHasNullValues(layer, name):
if layer and fieldExists(layer, name):
idx = getFieldIndex(layer, name)
vals = layer.uniqueValues(idx,1)
# depending on the provider list is empty or has NULL value in first position
if len(vals) == 0 or (len(vals) == 1 and vals[0] == NULL):
return True
else:
return False
def getFieldValues(layer, fieldname, null=True, selection=False):
attributes = []
ids = []
if fieldExists(layer, fieldname):
if selection:
features = layer.selectedFeatures()
else:
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, fieldname)])
features = layer.getFeatures(request)
if null:
for feature in features:
attributes.append(feature.attribute(fieldname))
ids.append(feature.id())
else:
for feature in features:
val = feature.attribute(fieldname)
if val != NULL:
attributes.append(val)
ids.append(feature.id())
return attributes, ids
def addFields(layer, names, types):
res = False
if layer:
provider = layer.dataProvider()
caps = provider.capabilities()
if caps & QgsVectorDataProvider.AddAttributes:
fields = provider.fields()
for i, name in enumerate(names):
#add new field if it doesn't exist
if fields.indexFromName(name) == -1:
res = provider.addAttributes([QgsField(name, types[i])])
#apply changes if any made
if res:
layer.updateFields()
return res
#
# Feature functions
#
def getFeaturesByListValues(layer, name, values=list):
features = {}
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if att in values:
features[feature.id()] = att
return features
def getFeatureIdsByListValues(layer, name, values=list):
ids = []
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if att in values:
ids.append(feature.id())
return ids
def selectFeaturesByListValues(layer, name, values=list):
features = []
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if att in values:
features.append(feature.id())
layer.setSelectedFeatures(features)
def selectFeaturesFromTable(layer, fields, tup_list): # list of tuples
features = []
if layer:
for item in tup_list: # item = (timestamp,[[osmid,osmid]])
timestamp,osmids = item
if type(osmids) == list:
for osmid in osmids:
expression = "\"%s\"='%s' and \"%s\"='%s\'" % (fields[0],timestamp, fields[1], osmid)
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
for feature in iterator:
features.append(feature.id())
else:
expression = "\"%s\"='%s' and \"%s\"='%s\'" % (fields[0],timestamp, fields[1], osmids)
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
for feature in iterator:
features.append(feature.id())
layer.setSelectedFeatures(features)
def getFeatureIDFromTable(layer, fields, tup_list):
ids = []
if layer:
for item in tup_list: # item = (timestamp,[[osmid,osmid]])
timestamp,osmids = item
if type(osmids) == list:
for osmid in osmids:
expression = "\"%s\"='%s' and \"%s\"='%s\'" % (fields[0],timestamp, fields[1], osmid)
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
for feature in iterator:
ids.append(feature.id())
else:
expression = "\"%s\"='%s' and \"%s\"='%s\'" % (fields[0],timestamp, fields[1], osmids)
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
for feature in iterator:
ids.append(feature.id())
return ids
def getFeaturesByRangeValues(layer, name, min, max):
features = {}
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if min <= att <= max:
features[feature.id()] = att
return features
def selectFeaturesByRangeValues(layer, name, min, max):
features = []
if layer:
if fieldExists(layer, name):
request = QgsFeatureRequest().setSubsetOfAttributes([getFieldIndex(layer, name)])
iterator = layer.getFeatures(request)
for feature in iterator:
att = feature.attribute(name)
if min <= att <= max:
features.append(feature.id())
layer.setSelectedFeatures(features)
def selectFeaturesByExpression(layer, expression):
features = []
if layer:
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
for feature in iterator:
features.append(feature.id())
layer.setSelectedFeatures(features)
def filterFeaturesByExpression(layer, expression):
success = False
if layer:
try:
success = layer.setSubsetString(expression)
except:
success = False
return success
def getFeaturesByExpression(layer, expression):
features = {}
if layer:
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
features = {feature.id(): feature for feature in iterator}
return features
def getAllFeatures(layer):
allfeatures = {}
if layer:
features = layer.getFeatures()
allfeatures = {feature.id(): feature for feature in features}
return allfeatures
def getAllFeatureIds(layer):
ids = []
if layer:
features = layer.getFeatures()
ids = [feature.id() for feature in features]
return ids
def getFeatureIdsByExpression(layer, expression):
ids = []
if layer:
request = QgsFeatureRequest().setFilterExpression(expression)
iterator = layer.getFeatures(request)
ids = [feature.id() for feature in iterator]
return ids
def getAllFeatureSymbols(layer):
symbols = {}
if layer:
renderer = layer.rendererV2()
features = layer.getFeatures()
for feature in features:
symb = renderer.symbolsForFeature(feature)
if len(symb) > 0:
symbols = {feature.id(): symb[0].color()}
else:
symbols = {feature.id(): QColor(200,200,200,255)}
return symbols
def getAllFeatureData(layer):
data = {}
symbols = {}
if layer:
renderer = layer.rendererV2()
features = layer.getFeatures()
for feature in features:
data = {feature.id(): feature}
symb = renderer.symbolsForFeature(feature)
if len(symb) > 0:
symbols = {feature.id(): symb[0].color()}
else:
symbols = {feature.id(): QColor(200,200,200,255)}
return data, symbols
def getFeaturesByIntersection(base_layer, intersect_layer, crosses):
features = []
# retrieve objects to be intersected (list comprehension, more pythonic)
intersect_geom = [QgsGeometry(feat.geometry()) for feat in intersect_layer.getFeatures()]
# retrieve base layer objects
base = base_layer.getFeatures()
# should improve with spatial index for large data sets
#index = createIndex(base_layer)
# loop through base features and intersecting elements
# appends if intersecting, when crosses = True
# does the opposite if crosses = False
for feat in base:
append = not crosses
base_geom = QgsGeometry(feat.geometry())
for intersect in intersect_geom:
if base_geom.intersects(intersect):
append = crosses
break
if append:
features.append(feat)
return features
def getFeaturesIntersections(base_layer, intersect_layer):
intersections = []
# retrieve objects to be intersected (list comprehension, more pythonic)
obstacles_geom = [QgsGeometry(feat.geometry()) for feat in intersect_layer.getFeatures()]
# retrieve base layer objects
base = base_layer.getFeatures()
# loop through base features and intersecting elements
for feat in base:
base_geom = QgsGeometry(feat.geometry())
for obst in obstacles_geom:
if base_geom.intersects(obst):
intersections.append(base_geom.intersection(obst))
return intersections
def selectFeaturesByIntersection(base_layer, intersect_layer, crosses):
features = []
# retrieve objects to be intersected (list comprehension, more pythonic)
obstacles_geom = [QgsGeometry(feat.geometry()) for feat in intersect_layer.getFeatures()]
# retrieve base layer objects
base = base_layer.getFeatures()
# loop through base features and intersecting elements
for feat in base:
append = not crosses
base_geom = QgsGeometry(feat.geometry())
for obst in obstacles_geom:
if base_geom.intersects(obst):
append = crosses
break
if append:
features.append(feat.id())
base_layer.select(features)
#
# Canvas functions
#
# Display a message in the QGIS canvas
def showMessage(iface, msg, type='Info', lev=1, dur=2):
iface.messageBar().pushMessage(type,msg,level=lev,duration=dur)
def getCanvasColour(iface):
colour = iface.mapCanvas().canvasColor()
return colour
def printCanvas(filename=''):
if not filename:
filename = 'print_map.pdf'
# image size parameters
imageWidth_mm = 10000
imageHeight_mm = 10000
dpi = 300
map_settings = QgisInterface.mapCanvas().mapSettings()
c = QgsComposition(map_settings)
c.setPaperSize(imageWidth_mm, imageHeight_mm)
c.setPrintResolution(dpi)
x, y = 0, 0
w, h = c.paperWidth(), c.paperHeight()
composerMap = QgsComposerMap(c, x ,y, w, h)
composerMap.setBackgroundEnabled(True)
c.addItem(composerMap)
dpmm = dpi / 25.4
width = int(dpmm * c.paperWidth())
height = int(dpmm * c.paperHeight())
# create output image and initialize it
image = QtGui.QImage(QtCore.QSize(width, height), QtGui.QImage.Format_ARGB32)
image.setDotsPerMeterX(dpmm * 1000)
image.setDotsPerMeterY(dpmm * 1000)
imagePainter = QtGui.QPainter(image)
c.setPlotStyle(QgsComposition.Print)
c.renderPage( imagePainter, 0 )
imagePainter.end()
image.save(filename, "PDF")
#
# Network functions
#
def makeUndirectedGraph(network_layer, points=list):
graph = None
tied_points = []
if network_layer:
director = QgsLineVectorLayerDirector(network_layer, -1, '', '', '', 3)
properter = QgsDistanceArcProperter()
director.addProperter(properter)
builder = QgsGraphBuilder(network_layer.crs())
tied_points = director.makeGraph(builder, points)
graph = builder.graph()
return graph, tied_points
def makeDirectedGraph(network_layer, points=list, direction_field=-1, one_way='', reverse_way='', two_way='', default_direction=3):
graph = None
tied_points = []
if network_layer:
director = QgsLineVectorLayerDirector(network_layer, direction_field, one_way, reverse_way, two_way, default_direction)
properter = QgsDistanceArcProperter()
director.addProperter(properter)
builder = QgsGraphBuilder(network_layer.crs())
tied_points = director.makeGraph(builder, points)
graph = builder.graph()
return graph, tied_points
def calculateRouteTree(graph, tied_points, origin, destination, impedance=0):
points = []
if tied_points:
try:
from_point = tied_points[origin]
to_point = tied_points[destination]
except:
return points
# analyse graph
if graph:
form_id = graph.findVertex(from_point)
tree = QgsGraphAnalyzer.shortestTree(graph, form_id, impedance)
form_id = tree.findVertex(from_point)
to_id = tree.findVertex(to_point)
# iterate to get all points in route
if to_id == -1:
pass
else:
while form_id != to_id:
l = tree.vertex(to_id).inArc()
if len(l) == 0:
break
e = tree.arc(l[0])
points.insert(0, tree.vertex(e.inVertex()).point())
to_id = e.outVertex()
points.insert(0, from_point)
return points
def calculateRouteDijkstra(graph, tied_points, origin, destination, impedance=0):
cost = []
points = []
if tied_points:
try:
from_point = tied_points[origin]
to_point = tied_points[destination]
except:
return points
# analyse graph
if graph:
from_id = graph.findVertex(from_point)
to_id = graph.findVertex(to_point)
(tree, cost) = QgsGraphAnalyzer.dijkstra(graph, from_id, impedance)
if tree[to_id] == -1:
pass
else:
curPos = to_id
while curPos != from_id:
points.append(graph.vertex(graph.arc(tree[curPos]).inVertex()).point())
curPos = graph.arc(tree[curPos]).outVertex()
points.append(from_point)
points.reverse()
if cost:
return points,cost
else:
return points
def calculateServiceArea(mapCanvas, graph, tied_points, origin, cutoff, impedance=0):
points = {}
if tied_points:
try:
from_point = tied_points[origin]
except:
return points
# analyse graph
if graph:
from_id = graph.findVertex(from_point)
(tree, cost) = QgsGraphAnalyzer.dijkstra(graph, from_id, impedance)
i = 0
while i < len(cost):
if cost[i] > cutoff and tree[i] != -1:
outVertexId = graph.arc(tree[i]).outVertex()
if cost[outVertexId] < cutoff:
points[str(i)]=((graph.vertex(i).point()),cost)
i += 1
return points
#
# General functions
#
def getLastDir(tool_name=''):
path = ''
settings = QtCore.QSettings(tool_name,"")
settings.value("lastUsedDir",str(""))
return path
def setLastDir(filename, tool_name=''):
path = QtCore.QFileInfo(filename).absolutePath()
settings = QtCore.QSettings(tool_name,"")
settings.setValue("lastUsedDir", str(unicode(path)))
# check if a text string is of numeric type
def isNumeric(txt):
try:
int(txt)
return True
except ValueError:
try:
long(txt)
return True
except ValueError:
try:
float(txt)
return True
except ValueError:
return False
# convert a text string to a numeric value, if possible
def convertNumeric(txt):
try:
value = int(txt)
except ValueError:
try:
value = long(txt)
except ValueError:
try:
value = float(txt)
except ValueError:
value = ''
return value
def truncateNumber(num,digits=9):
if isNumeric(num):
truncated = str(num)
if '.' in truncated:
truncated = truncated[:digits]
truncated = truncated.rstrip('0').rstrip('.')
return convertNumeric(truncated)
# Function to create a spatial index for QgsVectorDataProvider
def createIndex(layer):
provider = layer.dataProvider()
caps = provider.capabilities()
if caps & QgsVectorDataProvider.CreateSpatialIndex:
feat = QgsFeature()
index = QgsSpatialIndex()
fit = provider.getFeatures()
while fit.nextFeature(feat):
index.insertFeature(feat)
return index
else:
return None
def drawRouteBand(canvas, points, colour='red', width=3):
# check QColor.colorNames() for valid colour names
rb = QgsRubberBand(canvas, False)
try:
rb.setColor(QtGui.QColor(colour))
except:
rb.setColor(QtCore.Qt.red)
rb.setWidth(width)
for pnt in points:
rb.addPoint(pnt)
rb.show()
#------------------------------
# General database functions
#------------------------------
def getDBLayerConnection(layer):
provider = layer.providerType()
uri = QgsDataSourceURI(layer.dataProvider().dataSourceUri())
if provider == 'spatialite':
path = uri.database()
connection_object = getSpatialiteConnection(path)
elif provider == 'postgres':
connection_object = pgsql.connect(uri.connectionInfo().encode('utf-8'))
else:
connection_object = None
return connection_object
def getSpatialiteConnection(path):
try:
connection=sqlite.connect(path)
except sqlite.OperationalError, error:
#pop_up_error("Unable to connect to selected database: \n %s" % error)
connection = None
return connection
def getDBLayerTableName(layer):
uri = QgsDataSourceURI(layer.dataProvider().dataSourceUri())
return uri.table()
def getDBLayerGeometryColumn(layer):
uri = QgsDataSourceURI(layer.dataProvider().dataSourceUri())
return uri.geometryColumn()
def getDBLayerPrimaryKey(layer):
uri = QgsDataSourceURI(layer.dataProvider().dataSourceUri())
return uri.key()
#------------------------------
# Layer creation functions
#------------------------------
def createTempLayer(name, geometry, srid, attributes, types):
#geometry can be 'POINT', 'LINESTRING' or 'POLYGON' or the 'MULTI' version of the previous
vlayer = QgsVectorLayer('%s?crs=EPSG:%s'% (geometry, srid), name, "memory")
provider = vlayer.dataProvider()
#create the required fields
if attributes:
vlayer.startEditing()
fields = []
for i, att in enumerate(attributes):
fields.append(QgsField(att, types[i]))
# add the fields to the layer
try:
provider.addAttributes(fields)
except:
return None
vlayer.commitChanges()
return vlayer
def loadTempLayer(layer,shown=True):
QgsMapLayerRegistry.instance().addMapLayer(layer,shown)
def insertTempFeatures(layer, geometry, attributes):
provider = layer.dataProvider()
geometry_type = provider.geometryType()
for i, geom in enumerate(geometry):
fet = QgsFeature()
if geometry_type in (1, 4):
fet.setGeometry(QgsGeometry.fromPoint(geom))
elif geometry_type in (2, 5):
fet.setGeometry(QgsGeometry.fromPolyline(geom))
elif geometry_type in (3, 6):
fet.setGeometry(QgsGeometry.fromPolygon(geom))
if attributes:
fet.setAttributes(attributes[i])
provider.addFeatures([fet])
provider.updateExtents()
def createTempLayerFull(name, srid, attributes, types, values, coords): # layername, CRS, Fieldnames, geomtype, att values, geom(pts
# create an instance of a memory vector layer
type = ''
if len(coords) == 2: type = 'Point'
elif len(coords) == 4: type = 'LineString'
vlayer = QgsVectorLayer('%s?crs=EPSG:%s'% (type, srid), name, "memory")
provider = vlayer.dataProvider()
#create the required fields
fields = []
for i, name in enumerate(attributes):
fields.append(QgsField(name, types[i]))
# add the fields to the layer
vlayer.startEditing()
try:
provider.addAttributes(fields)
except:
return None
# add features by iterating the values
features = []
for i, val in enumerate(values):
feat = QgsFeature()
# add geometry
try:
if type == 'Point':
feat.setGeometry(QgsGeometry.fromPoint([QgsPoint(float(val[coords[0]]),float(val[coords[1]]))]))
elif type == 'LineString':
feat.setGeometry(QgsGeometry.fromPolyline([QgsPoint(float(val[coords[0]]),float(val[coords[1]])),
QgsPoint(float(val[coords[2]]),float(val[coords[3]]))]))
except:
pass
# add attribute values
feat.setAttributes(list(val))
features.append(feat)
# add the features to the layer
try:
provider.addFeatures(features)
except:
return None
vlayer.commitChanges()
vlayer.updateExtents()
if not vlayer.isValid():
print "Layer failed to load!"
return None
return vlayer
#---------------------------------------------
# Shape file specific functions
#---------------------------------------------
def listShapeFolders():
# get folder name and path of open layers
res = dict()
res['idx'] = 0
res['name'] = []
res['path'] = []
layers = getRegistryLayers('all', 'ogr')
for layer in layers:
provider = layer.dataProvider()
if layer.storageType() == 'ESRI Shapefile':
path = os.path.dirname(layer.dataProvider().dataSourceUri())
try:
idx = res['path'].index(path)
except:
res['name'].append(os.path.basename(os.path.normpath(path))) #layer.name()
res['path'].append(path)
#for the file name: os.path.basename(uri).split('|')[0]
#case: no folders available
if len(res['name']) < 1:
res = None
#return the result even if empty
return res
def testShapeFileExists(path, name):
filename = path+"/"+name+".shp"
exists = os.path.isfile(filename)
return exists
def copyLayerToShapeFile(layer, path, name):
#Get layer provider
provider = layer.dataProvider()
filename = path+"/"+name+".shp"
fields = provider.fields()
if layer.hasGeometryType():
geometry = layer.wkbType()
else:
geometry = None
srid = layer.crs()
# create an instance of vector file writer, which will create the vector file.
writer = QgsVectorFileWriter(filename, "CP1250", fields, geometry, srid, "ESRI Shapefile")
if writer.hasError() != QgsVectorFileWriter.NoError:
print "Error when creating shapefile: ", writer.hasError()
return None
# add features by iterating the values
for feat in layer.getFeatures():
writer.addFeature(feat)
# delete the writer to flush features to disk
del writer
# open the newly created file
vlayer = QgsVectorLayer(filename, name, "ogr")
if not vlayer.isValid():
print "Layer failed to load!"
return None
return vlayer
def createShapeFileLayer(path, name, srid, attributes, types, geometrytype):
# create new empty layer with given attributes
# todo: created table has no attributes. not used
# use createShapeFileFullLayer instead
filename = path+"/"+name+".shp"
#create the required fields
fields = QgsFields()
for i, attr in enumerate(attributes):
fields.append(QgsField(attr, types[i]))
# create an instance of vector file writer, which will create the vector file.
writer = None
if 'point' in geometrytype.lower():
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBPoint, srid, "ESRI Shapefile")
elif 'line' in geometrytype.lower():
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBLineString, srid, "ESRI Shapefile")
elif 'polygon' in geometrytype.lower():
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBPolygon, srid, "ESRI Shapefile")
if writer.hasError() != QgsVectorFileWriter.NoError:
print "Error when creating shapefile: ", writer.hasError()
return None
# delete the writer to flush features to disk (optional)
del writer
# open the newly created file
vlayer = QgsVectorLayer(filename, name, "ogr")
if not vlayer.isValid():
print "Layer failed to open!"
return None
return vlayer
def createShapeFileFullLayer(path, name, srid, attributes, types, values, coords):
# create new layer with given attributes and data, including geometry (point and lines only)
filename = path+"/"+name+".shp"
#create the required fields
fields = QgsFields()
for i, attr in enumerate(attributes):
fields.append(QgsField(attr, types[i]))
# create an instance of vector file writer, which will create the vector file.
writer = None
if len(coords) == 2:
type = 'point'
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBPoint, srid, "ESRI Shapefile")
elif len(coords) == 4:
type = 'line'
writer = QgsVectorFileWriter(filename, "CP1250", fields, QGis.WKBLineString, srid, "ESRI Shapefile")
if writer.hasError() != QgsVectorFileWriter.NoError:
print "Error when creating shapefile: ", writer.hasError()
return None
# add features by iterating the values
feat = QgsFeature()
for i, val in enumerate(values):
# add geometry
try:
if type == 'point':
feat.setGeometry(QgsGeometry.fromPoint([QgsPoint(float(val[coords[0]]),float(val[coords[1]]))]))
elif type == 'line':
feat.setGeometry(QgsGeometry.fromPolyline([QgsPoint(float(val[coords[0]]),float(val[coords[1]])),
QgsPoint(float(val[coords[2]]),float(val[coords[3]]))]))
except: pass
# add attributes
attrs = []
for j, attr in enumerate(attributes):
attrs.append(val[j])
feat.setAttributes(attrs)
writer.addFeature(feat)
# delete the writer to flush features to disk (optional)
del writer
# open the newly created file
vlayer = QgsVectorLayer(filename, name, "ogr")
if not vlayer.isValid():
print "Layer failed to load!"
return None
return vlayer
def addShapeFileAttributes(layer, attributes, types, values):
# add attributes to an existing layer
attributes_pos = dict()
res = False
if layer:
provider = layer.dataProvider()
caps = provider.capabilities()
res = False
if caps & QgsVectorDataProvider.AddAttributes:
fields = provider.fields()
count = fields.count()
for i, name in enumerate(attributes):
#add new field if it doesn't exist
if fields.indexFromName(name) == -1:
res = provider.addAttributes([QgsField(name, types[i])])
# keep position of attributes that are added, since name can change
attributes_pos[i] = count
count += 1
#apply changes if any made
if res:
layer.updateFields()
# update attribute values by iterating the layer's features
res = False
if caps & QgsVectorDataProvider.ChangeAttributeValues:
#fields = provider.fields() #the fields must be retrieved again after the updateFields() method
iter = layer.getFeatures()
for i, feature in enumerate(iter):
fid = feature.id()
#to update the features the attribute/value pairs must be converted to a dictionary for each feature
attrs = {}
for j in attributes_pos.iterkeys():
field_id = attributes_pos[j]
val = values[i][j]
attrs.update({field_id: val})
#update the layer with the corresponding dictionary
res = provider.changeAttributeValues({fid: attrs})
#apply changes if any made
if res:
layer.updateFields()
return res
def addShapeFileFeature(layer, geometry, attribute):
# attribute is a tuples/lists (col1, col2, col3)
# add geometryto featureclass
feat = QgsFeature()
feat.setGeometry(geometry)
# add attributes to featureclass
feat.setAttributes(attribute)
layer.addFeature(feat)
def addShapeFileFeatures(layer, geometries, attributes):
provider = layer.dataProvider()
geometry_type = provider.geometryType()
for i, geom in enumerate(geometries):
feat = QgsFeature()
if geometry_type in (1, 4):
feat.setGeometry(QgsGeometry.fromPoint(geom))
elif geometry_type in (2, 5):
feat.setGeometry(QgsGeometry.fromPolyline(geom))
elif geometry_type in (3, 6):
feat.setGeometry(QgsGeometry.fromPolygon(geom))
if attributes:
feat.setAttributes(attributes[i])
provider.addFeatures([feat])
provider.updateExtents()
def deleteAllFeatures(layer):
fids = getAllFeatureIds(layer)
layer.dataProvider().deleteFeatures(fids)
|
dsbrown/FreeCAD
|
refs/heads/master
|
src/Mod/TemplatePyMod/Commands.py
|
18
|
# FreeCAD TemplatePyMod module
# (c) 2007 Juergen Riegel LGPL
#
# import FreeCAD modules
import FreeCAD, FreeCADGui,inspect
# helper -------------------------------------------------------------------
def addCommand(name,cmdObject):
(list,num) = inspect.getsourcelines(cmdObject.Activated)
pos = 0
# check for indentation
while(list[1][pos] == ' ' or list[1][pos] == '\t'):
pos += 1
source = ""
for i in range(len(list)-1):
source += list[i+1][pos:]
FreeCADGui.addCommand(name,cmdObject,source)
#---------------------------------------------------------------------------
# The command classes
#---------------------------------------------------------------------------
class TemplatePyMod_Cmd1:
"Example command class"
def Activated(self):
print "TemplatePyMod_Cmd1 activated ;-) "
def GetResources(self):
return {'Pixmap' : 'Std_Tool1', 'MenuText': 'Example command', 'ToolTip': 'Very unimportand example command'}
class TemplatePyMod_Cmd2:
"Example command class"
def Activated(self):
d = FreeCAD.ActiveDocument
v = FreeCADGui.ActiveDocument.ActiveView
class PolygonCreator:
def __init__(self, doc, view, max):
self.doc = doc
self.view = view
self.call = view.addEventCallback("SoMouseButtonEvent",self.addVertex)
self.max = max
self.node=[]
self.count=0
self.poly=None
def addVertex(self, d):
if (d["State"] == "DOWN"):
pos = d["Position"]
self.node.append(self.view.getPoint(pos[0],pos[1]))
self.count = self.count+1
if (self.count == 1):
import Part,PartGui
self.poly=self.doc.addObject("Part::Polygon","Polygon")
self.poly.Nodes = self.node
self.poly.Close=True
else:
self.poly.Nodes = self.node
self.doc.recompute()
if (self.count == self.max):
self.node=[]
self.view.removeEventCallback("SoMouseButtonEvent",self.call)
p=PolygonCreator(d,v,10)
def IsActive(self):
if FreeCAD.ActiveDocument == None:
return False
else:
return True
def GetResources(self):
return {'Pixmap' : 'Std_Tool2', 'MenuText': 'Create polygon...', 'ToolTip': 'Create a polygon by clicking inside the viewer'}
class TemplatePyMod_Cmd3:
"Import PySide"
def Activated(self):
import PythonQt
from PySide import QtGui
mw=FreeCADGui.getMainWindow()
QtGui.QMessageBox.information(mw,"PySide","""PySide was loaded successfully.""")
FreeCADGui.activateWorkbench("PythonQtWorkbench")
def GetResources(self):
return {'Pixmap' : 'python', 'MenuText': 'Import PySide', 'ToolTip': 'Add a workbench for PySide samples'}
class SphereCreator:
def __init__(self):
import Part
self.pt = Part
self.mode = False
FreeCAD.Console.PrintMessage("Create instance of SphereCreator\n")
def __del__(self):
FreeCAD.Console.PrintMessage("Delete instance of SphereCreator\n")
def enter(self):
if (self.mode):
return
FreeCAD.Console.PrintMessage("Enter sphere creation mode\n")
self.av = FreeCADGui.ActiveDocument.ActiveView
self.cb = self.av.addEventCallback("SoMouseButtonEvent",self.create)
self.ex = self.av.addEventCallback("SoKeyboardEvent",self.exit)
self.mode = True
def leave(self):
if (not self.mode):
return
FreeCAD.Console.PrintMessage("Leave sphere creation mode\n")
self.av.removeEventCallback("SoMouseButtonEvent",self.cb)
self.av.removeEventCallback("SoKeyboardEvent",self.ex)
self.mode = False
def create(self, info):
down = (info["State"] == "DOWN")
pos = info["Position"]
if (down):
pnt = self.av.getPoint(pos[0],pos[1])
FreeCAD.Console.PrintMessage("Clicked on position: ("+str(pos[0])+", "+str(pos[0])+")")
msg = " -> (%f,%f,%f)\n" % (pnt.x, pnt.y, pnt.z)
FreeCAD.Console.PrintMessage(msg)
sph=self.pt.makeSphere(1.0, pnt)
self.pt.show(sph)
def exit(self, info):
esc = (info["Key"] == "ESCAPE")
if (esc):
self.leave()
class TemplatePyMod_Cmd4:
def __init__(self):
self.sc = SphereCreator()
def __del__(self):
FreeCAD.Console.PrintError('TemplatePyMod_Cmd4 was destroyed\n')
def Activated(self):
if FreeCADGui.ActiveDocument != None:
self.sc.enter()
else:
FreeCAD.Console.PrintWarning('A 3d view must be created\n')
def GetResources(self):
return {'Pixmap' : 'python', 'MenuText': 'Create spheres...', 'ToolTip': 'Click on the screen to create a sphere'}
myRenderArea = None
class TemplatePyMod_Cmd5:
"Example command class"
def Activated(self):
from pivy import sogui
from pivy import coin
global myRenderArea
if myRenderArea == None:
root = coin.SoSeparator()
myCamera = coin.SoPerspectiveCamera()
myMaterial = coin.SoMaterial()
root.addChild(myCamera)
root.addChild(coin.SoDirectionalLight())
#myMaterial.diffuseColor = (1.0, 0.0, 0.0) # Red
root.addChild(myMaterial)
root.addChild(coin.SoCone())
# Create a renderArea in which to see our scene graph.
# The render area will appear within the main window.
myRenderArea = sogui.SoGuiRenderArea(FreeCADGui.getMainWindow())
# Make myCamera see everything.
myCamera.viewAll(root, myRenderArea.getViewportRegion())
# Put our scene in myRenderArea, change the title
myRenderArea.setSceneGraph(root)
myRenderArea.setTitle("Hello Cone")
myRenderArea.show()
def GetResources(self):
return {'Pixmap' : 'Std_Tool1', 'MenuText': 'Render area', 'ToolTip': 'Show render area'}
class TemplatePyMod_Cmd6:
def Activated(self):
import FeaturePython
FeaturePython.makeBox()
def GetResources(self):
return {'Pixmap' : 'python', 'MenuText': 'Create a box', 'ToolTip': 'Use Box feature class which is completely written in Python'}
class TemplatePyGrp_1:
def Activated(self):
import FreeCAD
FreeCAD.Console.PrintMessage("TemplatePyGrp_1\n")
def GetResources(self):
return {'Pixmap' : 'Part_JoinConnect', 'MenuText': 'TemplatePyGrp_1', 'ToolTip': 'Print a message'}
class TemplatePyGrp_2:
def Activated(self):
import FreeCAD
FreeCAD.Console.PrintMessage("TemplatePyGrp_2\n")
def GetResources(self):
return {'Pixmap' : 'Part_JoinEmbed', 'MenuText': 'TemplatePyGrp_2', 'ToolTip': 'Print a message'}
class TemplatePyGrp_3:
def Activated(self):
import FreeCAD
FreeCAD.Console.PrintMessage("TemplatePyGrp_3\n")
def GetResources(self):
return {'Pixmap' : 'Part_JoinCutout', 'MenuText': 'TemplatePyGrp_3', 'ToolTip': 'Print a message'}
class TemplatePyGroup:
"Example group command class"
#def Activated(self, index):
# print "TemplatePyGroup activated ;-) "
def GetCommands(self):
return ("TemplatePyGrp_1", "TemplatePyGrp_2", "TemplatePyGrp_3", "Std_New")
def GetDefaultCommand(self):
return 2
def GetResources(self):
return {'Pixmap' : 'python', 'MenuText': 'Group command', 'ToolTip': 'Example group command'}
class TemplatePyCheckable:
"Example toggle command class"
def Activated(self, index):
if index == 0:
print "Toggle is off"
else:
print "Toggle is on"
def GetResources(self):
return {'Pixmap' : 'python', 'MenuText': 'Toggle command', 'ToolTip': 'Example toggle command', 'Checkable': True}
#---------------------------------------------------------------------------
# Adds the commands to the FreeCAD command manager
#---------------------------------------------------------------------------
addCommand('TemplatePyMod_Cmd1',TemplatePyMod_Cmd1())
addCommand('TemplatePyMod_Cmd2',TemplatePyMod_Cmd2())
addCommand('TemplatePyMod_Cmd3',TemplatePyMod_Cmd3())
FreeCADGui.addCommand('TemplatePyMod_Cmd4',TemplatePyMod_Cmd4())
FreeCADGui.addCommand('TemplatePyMod_Cmd5',TemplatePyMod_Cmd5())
FreeCADGui.addCommand('TemplatePyMod_Cmd6',TemplatePyMod_Cmd6())
FreeCADGui.addCommand('TemplatePyGrp_1',TemplatePyGrp_1())
FreeCADGui.addCommand('TemplatePyGrp_2',TemplatePyGrp_2())
FreeCADGui.addCommand('TemplatePyGrp_3',TemplatePyGrp_3())
FreeCADGui.addCommand('TemplatePyGroup',TemplatePyGroup())
FreeCADGui.addCommand('TemplatePyCheckable',TemplatePyCheckable())
|
rkmaddox/mne-python
|
refs/heads/master
|
mne/label.py
|
4
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
from collections import defaultdict
from colorsys import hsv_to_rgb, rgb_to_hsv
import copy as cp
import os
import os.path as op
import re
import numpy as np
from .morph_map import read_morph_map
from .parallel import parallel_func, check_n_jobs
from .source_estimate import (SourceEstimate, VolSourceEstimate,
_center_of_mass, extract_label_time_course,
spatial_src_adjacency)
from .source_space import (add_source_space_distances, SourceSpaces,
read_freesurfer_lut, _import_nibabel)
from .stats.cluster_level import _find_clusters, _get_components
from .surface import read_surface, fast_cross_3d, mesh_edges, mesh_dist
from .transforms import apply_trans
from .utils import (get_subjects_dir, _check_subject, logger, verbose, warn,
check_random_state, _validate_type, fill_doc,
_check_option, check_version)
def _blend_colors(color_1, color_2):
"""Blend two colors in HSV space.
Parameters
----------
color_1, color_2 : None | tuple
RGBA tuples with values between 0 and 1. None if no color is available.
If both colors are None, the output is None. If only one is None, the
output is the other color.
Returns
-------
color : None | tuple
RGBA tuple of the combined color. Saturation, value and alpha are
averaged, whereas the new hue is determined as angle half way between
the two input colors' hues.
"""
if color_1 is None and color_2 is None:
return None
elif color_1 is None:
return color_2
elif color_2 is None:
return color_1
r_1, g_1, b_1, a_1 = color_1
h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1)
r_2, g_2, b_2, a_2 = color_2
h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2)
hue_diff = abs(h_1 - h_2)
if hue_diff < 0.5:
h = min(h_1, h_2) + hue_diff / 2.
else:
h = max(h_1, h_2) + (1. - hue_diff) / 2.
h %= 1.
s = (s_1 + s_2) / 2.
v = (v_1 + v_2) / 2.
r, g, b = hsv_to_rgb(h, s, v)
a = (a_1 + a_2) / 2.
color = (r, g, b, a)
return color
def _split_colors(color, n):
"""Create n colors in HSV space that occupy a gradient in value.
Parameters
----------
color : tuple
RGBA tuple with values between 0 and 1.
n : int >= 2
Number of colors on the gradient.
Returns
-------
colors : tuple of tuples, len = n
N RGBA tuples that occupy a gradient in value (low to high) but share
saturation and hue with the input color.
"""
r, g, b, a = color
h, s, v = rgb_to_hsv(r, g, b)
gradient_range = np.sqrt(n / 10.)
if v > 0.5:
v_max = min(0.95, v + gradient_range / 2)
v_min = max(0.05, v_max - gradient_range)
else:
v_min = max(0.05, v - gradient_range / 2)
v_max = min(0.95, v_min + gradient_range)
hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n))
rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors)
rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors)
return tuple(rgba_colors)
def _n_colors(n, bytes_=False, cmap='hsv'):
"""Produce a list of n unique RGBA color tuples based on a colormap.
Parameters
----------
n : int
Number of colors.
bytes : bool
Return colors as integers values between 0 and 255 (instead of floats
between 0 and 1).
cmap : str
Which colormap to use.
Returns
-------
colors : array, shape (n, 4)
RGBA color values.
"""
n_max = 2 ** 10
if n > n_max:
raise NotImplementedError("Can't produce more than %i unique "
"colors" % n_max)
from matplotlib.cm import get_cmap
cm = get_cmap(cmap, n_max)
pos = np.linspace(0, 1, n, False)
colors = cm(pos, bytes=bytes_)
if bytes_:
# make sure colors are unique
for ii, c in enumerate(colors):
if np.any(np.all(colors[:ii] == c, 1)):
raise RuntimeError('Could not get %d unique colors from %s '
'colormap. Try using a different colormap.'
% (n, cmap))
return colors
@fill_doc
class Label(object):
"""A FreeSurfer/MNE label with vertices restricted to one hemisphere.
Labels can be combined with the ``+`` operator:
* Duplicate vertices are removed.
* If duplicate vertices have conflicting position values, an error
is raised.
* Values of duplicate vertices are summed.
Parameters
----------
vertices : array, shape (N,)
Vertex indices (0 based).
pos : array, shape (N, 3) | None
Locations in meters. If None, then zeros are used.
values : array, shape (N,) | None
Values at the vertices. If None, then ones are used.
hemi : 'lh' | 'rh'
Hemisphere to which the label applies.
comment : str
Kept as information but not used by the object itself.
name : str
Kept as information but not used by the object itself.
filename : str
Kept as information but not used by the object itself.
subject : str | None
Name of the subject the label is from.
color : None | matplotlib color
Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
%(verbose)s
Attributes
----------
color : None | tuple
Default label color, represented as RGBA tuple with values between 0
and 1.
comment : str
Comment from the first line of the label file.
hemi : 'lh' | 'rh'
Hemisphere.
name : None | str
A name for the label. It is OK to change that attribute manually.
pos : array, shape (N, 3)
Locations in meters.
subject : str | None
Subject name. It is best practice to set this to the proper
value on initialization, but it can also be set manually.
values : array, shape (N,)
Values at the vertices.
%(verbose)s
vertices : array, shape (N,)
Vertex indices (0 based)
"""
@verbose
def __init__(self, vertices=(), pos=None, values=None, hemi=None,
comment="", name=None, filename=None, subject=None,
color=None, verbose=None): # noqa: D102
# check parameters
if not isinstance(hemi, str):
raise ValueError('hemi must be a string, not %s' % type(hemi))
vertices = np.asarray(vertices, int)
if np.any(np.diff(vertices.astype(int)) <= 0):
raise ValueError('Vertices must be ordered in increasing order.')
if color is not None:
from matplotlib.colors import colorConverter
color = colorConverter.to_rgba(color)
if values is None:
values = np.ones(len(vertices))
else:
values = np.asarray(values)
if pos is None:
pos = np.zeros((len(vertices), 3))
else:
pos = np.asarray(pos)
if not (len(vertices) == len(values) == len(pos)):
raise ValueError("vertices, values and pos need to have same "
"length (number of vertices)")
# name
if name is None and filename is not None:
name = op.basename(filename[:-6])
self.vertices = vertices
self.pos = pos
self.values = values
self.hemi = hemi
self.comment = comment
self.verbose = verbose
self.subject = _check_subject(None, subject, raise_error=False)
self.color = color
self.name = name
self.filename = filename
def __setstate__(self, state): # noqa: D105
self.vertices = state['vertices']
self.pos = state['pos']
self.values = state['values']
self.hemi = state['hemi']
self.comment = state['comment']
self.verbose = state['verbose']
self.subject = state.get('subject', None)
self.color = state.get('color', None)
self.name = state['name']
self.filename = state['filename']
def __getstate__(self): # noqa: D105
out = dict(vertices=self.vertices,
pos=self.pos,
values=self.values,
hemi=self.hemi,
comment=self.comment,
verbose=self.verbose,
subject=self.subject,
color=self.color,
name=self.name,
filename=self.filename)
return out
def __repr__(self): # noqa: D105
name = 'unknown, ' if self.subject is None else self.subject + ', '
name += repr(self.name) if self.name is not None else "unnamed"
n_vert = len(self)
return "<Label | %s, %s : %i vertices>" % (name, self.hemi, n_vert)
def __len__(self):
"""Return the number of vertices.
Returns
-------
n_vertices : int
The number of vertices.
"""
return len(self.vertices)
def __add__(self, other):
"""Add Labels."""
_validate_type(other, (Label, BiHemiLabel), 'other')
if isinstance(other, BiHemiLabel):
return other + self
else: # isinstance(other, Label)
if self.subject != other.subject:
raise ValueError('Label subject parameters must match, got '
'"%s" and "%s". Consider setting the '
'subject parameter on initialization, or '
'setting label.subject manually before '
'combining labels.' % (self.subject,
other.subject))
if self.hemi != other.hemi:
name = '%s + %s' % (self.name, other.name)
if self.hemi == 'lh':
lh, rh = self.copy(), other.copy()
else:
lh, rh = other.copy(), self.copy()
color = _blend_colors(self.color, other.color)
return BiHemiLabel(lh, rh, name, color)
# check for overlap
duplicates = np.intersect1d(self.vertices, other.vertices)
n_dup = len(duplicates)
if n_dup:
self_dup = [np.where(self.vertices == d)[0][0]
for d in duplicates]
other_dup = [np.where(other.vertices == d)[0][0]
for d in duplicates]
if not np.all(self.pos[self_dup] == other.pos[other_dup]):
err = ("Labels %r and %r: vertices overlap but differ in "
"position values" % (self.name, other.name))
raise ValueError(err)
isnew = np.array([v not in duplicates for v in other.vertices])
vertices = np.hstack((self.vertices, other.vertices[isnew]))
pos = np.vstack((self.pos, other.pos[isnew]))
# find position of other's vertices in new array
tgt_idx = [np.where(vertices == v)[0][0] for v in other.vertices]
n_self = len(self.values)
n_other = len(other.values)
new_len = n_self + n_other - n_dup
values = np.zeros(new_len, dtype=self.values.dtype)
values[:n_self] += self.values
values[tgt_idx] += other.values
else:
vertices = np.hstack((self.vertices, other.vertices))
pos = np.vstack((self.pos, other.pos))
values = np.hstack((self.values, other.values))
indcs = np.argsort(vertices)
vertices, pos, values = vertices[indcs], pos[indcs, :], values[indcs]
comment = "%s + %s" % (self.comment, other.comment)
name0 = self.name if self.name else 'unnamed'
name1 = other.name if other.name else 'unnamed'
name = "%s + %s" % (name0, name1)
color = _blend_colors(self.color, other.color)
verbose = self.verbose or other.verbose
label = Label(vertices, pos, values, self.hemi, comment, name, None,
self.subject, color, verbose)
return label
def __sub__(self, other):
"""Subtract Labels."""
_validate_type(other, (Label, BiHemiLabel), 'other')
if isinstance(other, BiHemiLabel):
if self.hemi == 'lh':
return self - other.lh
else:
return self - other.rh
else: # isinstance(other, Label):
if self.subject != other.subject:
raise ValueError('Label subject parameters must match, got '
'"%s" and "%s". Consider setting the '
'subject parameter on initialization, or '
'setting label.subject manually before '
'combining labels.' % (self.subject,
other.subject))
if self.hemi == other.hemi:
keep = np.in1d(self.vertices, other.vertices, True, invert=True)
else:
keep = np.arange(len(self.vertices))
name = "%s - %s" % (self.name or 'unnamed', other.name or 'unnamed')
return Label(self.vertices[keep], self.pos[keep], self.values[keep],
self.hemi, self.comment, name, None, self.subject,
self.color, self.verbose)
def save(self, filename):
r"""Write to disk as FreeSurfer \*.label file.
Parameters
----------
filename : str
Path to label file to produce.
Notes
-----
Note that due to file specification limitations, the Label's subject
and color attributes are not saved to disk.
"""
write_label(filename, self)
def copy(self):
"""Copy the label instance.
Returns
-------
label : instance of Label
The copied label.
"""
return cp.deepcopy(self)
def fill(self, src, name=None):
"""Fill the surface between sources for a source space label.
Parameters
----------
src : SourceSpaces
Source space in which the label was defined. If a source space is
provided, the label is expanded to fill in surface vertices that
lie between the vertices included in the source space. For the
added vertices, ``pos`` is filled in with positions from the
source space, and ``values`` is filled in from the closest source
space vertex.
name : None | str
Name for the new Label (default is self.name).
Returns
-------
label : Label
The label covering the same vertices in source space but also
including intermediate surface vertices.
See Also
--------
Label.restrict
Label.smooth
"""
# find source space patch info
if len(self.vertices) == 0:
return self.copy()
hemi_src = _get_label_src(self, src)
if not np.all(np.in1d(self.vertices, hemi_src['vertno'])):
msg = "Source space does not contain all of the label's vertices"
raise ValueError(msg)
if hemi_src['nearest'] is None:
warn("Source space is being modified in place because patch "
"information is needed. To avoid this in the future, run "
"mne.add_source_space_distances() on the source space "
"and save it to disk.")
if check_version('scipy', '1.3'):
dist_limit = 0
else:
warn('SciPy < 1.3 detected, adding source space patch '
'information will be slower. Consider upgrading SciPy.')
dist_limit = np.inf
add_source_space_distances(src, dist_limit=dist_limit)
nearest = hemi_src['nearest']
# find new vertices
include = np.in1d(nearest, self.vertices, False)
vertices = np.nonzero(include)[0]
# values
nearest_in_label = np.digitize(nearest[vertices], self.vertices, True)
values = self.values[nearest_in_label]
# pos
pos = hemi_src['rr'][vertices]
name = self.name if name is None else name
label = Label(vertices, pos, values, self.hemi, self.comment, name,
None, self.subject, self.color)
return label
def restrict(self, src, name=None):
"""Restrict a label to a source space.
Parameters
----------
src : instance of SourceSpaces
The source spaces to use to restrict the label.
name : None | str
Name for the new Label (default is self.name).
Returns
-------
label : instance of Label
The Label restricted to the set of source space vertices.
See Also
--------
Label.fill
Notes
-----
.. versionadded:: 0.20
"""
if len(self.vertices) == 0:
return self.copy()
hemi_src = _get_label_src(self, src)
mask = np.in1d(self.vertices, hemi_src['vertno'])
name = self.name if name is None else name
label = Label(self.vertices[mask], self.pos[mask], self.values[mask],
self.hemi, self.comment, name, None, self.subject,
self.color)
return label
@verbose
def smooth(self, subject=None, smooth=2, grade=None,
subjects_dir=None, n_jobs=1, verbose=None):
"""Smooth the label.
Useful for filling in labels made in a
decimated source space for display.
Parameters
----------
subject : str | None
The name of the subject used. If None, the value will be
taken from self.subject.
smooth : int
Number of iterations for the smoothing of the surface data.
Cannot be None here since not all vertices are used. For a
grade of 5 (e.g., fsaverage), a smoothing of 2 will fill a
label.
grade : int, list of shape (2,), array, or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
values will be morphed to the set of vertices specified in grade[0]
and grade[1], assuming that these are vertices for the left and
right hemispheres. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. If one array is used, it is assumed
that all vertices belong to the hemisphere of the label. To create
a label filling the surface, use None.
%(subjects_dir)s
%(n_jobs)s
%(verbose_meth)s
Returns
-------
label : instance of Label
The smoothed label.
Notes
-----
This function will set label.pos to be all zeros. If the positions
on the new surface are required, consider using mne.read_surface
with ``label.vertices``.
"""
subject = _check_subject(self.subject, subject)
return self.morph(subject, subject, smooth, grade, subjects_dir,
n_jobs, verbose)
@verbose
def morph(self, subject_from=None, subject_to=None, smooth=5, grade=None,
subjects_dir=None, n_jobs=1, verbose=None):
"""Morph the label.
Useful for transforming a label from one subject to another.
Parameters
----------
subject_from : str | None
The name of the subject of the current label. If None, the
initial subject will be taken from self.subject.
subject_to : str
The name of the subject to morph the label to. This will
be put in label.subject of the output label file.
smooth : int
Number of iterations for the smoothing of the surface data.
Cannot be None here since not all vertices are used.
grade : int, list of shape (2,), array, or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
values will be morphed to the set of vertices specified in grade[0]
and grade[1], assuming that these are vertices for the left and
right hemispheres. Note that specifying the vertices (e.g.,
``grade=[np.arange(10242), np.arange(10242)]`` for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. If one array is used, it is assumed
that all vertices belong to the hemisphere of the label. To create
a label filling the surface, use None.
%(subjects_dir)s
%(n_jobs)s
%(verbose_meth)s
Returns
-------
label : instance of Label
The morphed label.
See Also
--------
mne.morph_labels : Morph a set of labels.
Notes
-----
This function will set label.pos to be all zeros. If the positions
on the new surface are required, consider using `mne.read_surface`
with ``label.vertices``.
"""
from .morph import compute_source_morph, grade_to_vertices
subject_from = _check_subject(self.subject, subject_from)
if not isinstance(subject_to, str):
raise TypeError('"subject_to" must be entered as a string')
if not isinstance(smooth, int):
raise TypeError('smooth must be an integer')
if np.all(self.values == 0):
raise ValueError('Morphing label with all zero values will result '
'in the label having no vertices. Consider using '
'something like label.values.fill(1.0).')
idx = 0 if self.hemi == 'lh' else 1
if isinstance(grade, np.ndarray):
grade_ = [np.array([], int)] * 2
grade_[idx] = grade
grade = grade_
del grade_
grade = grade_to_vertices(subject_to, grade, subjects_dir=subjects_dir)
spacing = [np.array([], int)] * 2
spacing[idx] = grade[idx]
vertices = [np.array([], int)] * 2
vertices[idx] = self.vertices
data = self.values[:, np.newaxis]
assert len(data) == sum(len(v) for v in vertices)
stc = SourceEstimate(data, vertices, tmin=1, tstep=1,
subject=subject_from)
stc = compute_source_morph(
stc, subject_from, subject_to, spacing=spacing, smooth=smooth,
subjects_dir=subjects_dir, warn=False).apply(stc)
inds = np.nonzero(stc.data)[0]
self.values = stc.data[inds, :].ravel()
self.pos = np.zeros((len(inds), 3))
self.vertices = stc.vertices[idx][inds]
self.subject = subject_to
return self
@fill_doc
def split(self, parts=2, subject=None, subjects_dir=None,
freesurfer=False):
"""Split the Label into two or more parts.
Parameters
----------
parts : int >= 2 | tuple of str | str
Number of labels to create (default is 2), or tuple of strings
specifying label names for new labels (from posterior to anterior),
or 'contiguous' to split the label into connected components.
If a number or 'contiguous' is specified, names of the new labels
will be the input label's name with div1, div2 etc. appended.
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
%(subjects_dir)s
freesurfer : bool
By default (``False``) ``split_label`` uses an algorithm that is
slightly optimized for performance and numerical precision. Set
``freesurfer`` to ``True`` in order to replicate label splits from
FreeSurfer's ``mris_divide_parcellation``.
Returns
-------
labels : list of Label, shape (n_parts,)
The labels, starting from the lowest to the highest end of the
projection axis.
Notes
-----
If using 'contiguous' split, you must ensure that the label being split
uses the same triangular resolution as the surface mesh files in
``subjects_dir`` Also, some small fringe labels may be returned that
are close (but not connected) to the large components.
The spatial split finds the label's principal eigen-axis on the
spherical surface, projects all label vertex coordinates onto this
axis, and divides them at regular spatial intervals.
"""
if isinstance(parts, str) and parts == 'contiguous':
return _split_label_contig(self, subject, subjects_dir)
elif isinstance(parts, (tuple, int)):
return split_label(self, parts, subject, subjects_dir, freesurfer)
else:
raise ValueError("Need integer, tuple of strings, or string "
"('contiguous'). Got %s)" % type(parts))
def get_vertices_used(self, vertices=None):
"""Get the source space's vertices inside the label.
Parameters
----------
vertices : ndarray of int, shape (n_vertices,) | None
The set of vertices to compare the label to. If None, equals to
``np.arange(10242)``. Defaults to None.
Returns
-------
label_verts : ndarray of in, shape (n_label_vertices,)
The vertices of the label corresponding used by the data.
"""
if vertices is None:
vertices = np.arange(10242)
label_verts = vertices[np.in1d(vertices, self.vertices)]
return label_verts
def get_tris(self, tris, vertices=None):
"""Get the source space's triangles inside the label.
Parameters
----------
tris : ndarray of int, shape (n_tris, 3)
The set of triangles corresponding to the vertices in a
source space.
vertices : ndarray of int, shape (n_vertices,) | None
The set of vertices to compare the label to. If None, equals to
``np.arange(10242)``. Defaults to None.
Returns
-------
label_tris : ndarray of int, shape (n_tris, 3)
The subset of tris used by the label.
"""
vertices_ = self.get_vertices_used(vertices)
selection = np.all(np.in1d(tris, vertices_).reshape(tris.shape),
axis=1)
label_tris = tris[selection]
if len(np.unique(label_tris)) < len(vertices_):
logger.info('Surprising label structure. Trying to repair '
'triangles.')
dropped_vertices = np.setdiff1d(vertices_, label_tris)
n_dropped = len(dropped_vertices)
assert n_dropped == (len(vertices_) - len(np.unique(label_tris)))
# put missing vertices as extra zero-length triangles
add_tris = (dropped_vertices +
np.zeros((len(dropped_vertices), 3), dtype=int).T)
label_tris = np.r_[label_tris, add_tris.T]
assert len(np.unique(label_tris)) == len(vertices_)
return label_tris
@fill_doc
def center_of_mass(self, subject=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of the label.
This function computes the spatial center of mass on the surface
as in :footcite:`LarsonLee2013`.
Parameters
----------
subject : str | None
The subject the label is defined for.
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from the label. Otherwise,
it could be any vertex from surf. If an array of int, the
returned vertex will come from that array. If instance of
SourceSpaces (as of 0.13), the returned vertex will be from
the given source space. For most accuruate estimates, do not
restrict vertices.
%(subjects_dir)s
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by its label value.
See Also
--------
SourceEstimate.center_of_mass
vertex_to_mni
Notes
-----
.. versionadded:: 0.13
References
----------
.. footbibliography::
"""
if not isinstance(surf, str):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.values < 0):
raise ValueError('Cannot compute COM with negative values')
if np.all(self.values == 0):
raise ValueError('Cannot compute COM with all values == 0. For '
'structural labels, consider setting to ones via '
'label.values[:] = 1.')
vertex = _center_of_mass(self.vertices, self.values, self.hemi, surf,
subject, subjects_dir, restrict_vertices)
return vertex
def _get_label_src(label, src):
_validate_type(src, SourceSpaces, 'src')
if src.kind != 'surface':
raise RuntimeError('Cannot operate on SourceSpaces that are not '
'surface type, got %s' % (src.kind,))
if label.hemi == 'lh':
hemi_src = src[0]
else:
hemi_src = src[1]
return hemi_src
class BiHemiLabel(object):
"""A freesurfer/MNE label with vertices in both hemispheres.
Parameters
----------
lh : Label
Label for the left hemisphere.
rh : Label
Label for the right hemisphere.
name : None | str
Name for the label.
color : None | color
Label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
Note that due to file specification limitations, the color isn't saved
to or loaded from files written to disk.
Attributes
----------
lh : Label
Label for the left hemisphere.
rh : Label
Label for the right hemisphere.
name : None | str
A name for the label. It is OK to change that attribute manually.
subject : str | None
Subject the label is from.
"""
def __init__(self, lh, rh, name=None, color=None): # noqa: D102
if lh.subject != rh.subject:
raise ValueError('lh.subject (%s) and rh.subject (%s) must '
'agree' % (lh.subject, rh.subject))
self.lh = lh
self.rh = rh
self.name = name
self.subject = lh.subject
self.color = color
self.hemi = 'both'
def __repr__(self): # noqa: D105
temp = "<BiHemiLabel | %s, lh : %i vertices, rh : %i vertices>"
name = 'unknown, ' if self.subject is None else self.subject + ', '
name += repr(self.name) if self.name is not None else "unnamed"
return temp % (name, len(self.lh), len(self.rh))
def __len__(self):
"""Return the number of vertices.
Returns
-------
n_vertices : int
The number of vertices.
"""
return len(self.lh) + len(self.rh)
def __add__(self, other):
"""Add labels."""
if isinstance(other, Label):
if other.hemi == 'lh':
lh = self.lh + other
rh = self.rh
else:
lh = self.lh
rh = self.rh + other
elif isinstance(other, BiHemiLabel):
lh = self.lh + other.lh
rh = self.rh + other.rh
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
name = '%s + %s' % (self.name, other.name)
color = _blend_colors(self.color, other.color)
return BiHemiLabel(lh, rh, name, color)
def __sub__(self, other):
"""Subtract labels."""
_validate_type(other, (Label, BiHemiLabel), 'other')
if isinstance(other, Label):
if other.hemi == 'lh':
lh = self.lh - other
rh = self.rh
else:
rh = self.rh - other
lh = self.lh
else: # isinstance(other, BiHemiLabel)
lh = self.lh - other.lh
rh = self.rh - other.rh
if len(lh.vertices) == 0:
return rh
elif len(rh.vertices) == 0:
return lh
else:
name = '%s - %s' % (self.name, other.name)
return BiHemiLabel(lh, rh, name, self.color)
def read_label(filename, subject=None, color=None):
"""Read FreeSurfer Label file.
Parameters
----------
filename : str
Path to label file.
subject : str | None
Name of the subject the data are defined for.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
color : None | matplotlib color
Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
Note that due to file specification limitations, the color isn't saved
to or loaded from files written to disk.
Returns
-------
label : Label
Instance of Label object with attributes:
- ``comment``: comment from the first line of the label file
- ``vertices``: vertex indices (0 based, column 1)
- ``pos``: locations in meters (columns 2 - 4 divided by 1000)
- ``values``: values at the vertices (column 5)
See Also
--------
read_labels_from_annot
write_labels_to_annot
"""
if subject is not None and not isinstance(subject, str):
raise TypeError('subject must be a string')
# find hemi
basename = op.basename(filename)
if basename.endswith('lh.label') or basename.startswith('lh.'):
hemi = 'lh'
elif basename.endswith('rh.label') or basename.startswith('rh.'):
hemi = 'rh'
else:
raise ValueError('Cannot find which hemisphere it is. File should end'
' with lh.label or rh.label: %s' % (basename,))
# find name
if basename.startswith(('lh.', 'rh.')):
basename_ = basename[3:]
if basename.endswith('.label'):
basename_ = basename[:-6]
else:
basename_ = basename[:-9]
name = "%s-%s" % (basename_, hemi)
# read the file
with open(filename, 'r') as fid:
comment = fid.readline().replace('\n', '')[1:]
nv = int(fid.readline())
data = np.empty((5, nv))
for i, line in enumerate(fid):
data[:, i] = line.split()
# let's make sure everything is ordered correctly
vertices = np.array(data[0], dtype=np.int32)
pos = 1e-3 * data[1:4].T
values = data[4]
order = np.argsort(vertices)
vertices = vertices[order]
pos = pos[order]
values = values[order]
label = Label(vertices, pos, values, hemi, comment, name, filename,
subject, color)
return label
@verbose
def write_label(filename, label, verbose=None):
"""Write a FreeSurfer label.
Parameters
----------
filename : str
Path to label file to produce.
label : Label
The label object to save.
%(verbose)s
See Also
--------
write_labels_to_annot
Notes
-----
Note that due to file specification limitations, the Label's subject and
color attributes are not saved to disk.
"""
hemi = label.hemi
path_head, name = op.split(filename)
if name.endswith('.label'):
name = name[:-6]
if not (name.startswith(hemi) or name.endswith(hemi)):
name += '-' + hemi
filename = op.join(path_head, name) + '.label'
logger.info('Saving label to : %s' % filename)
with open(filename, 'wb') as fid:
n_vertices = len(label.vertices)
data = np.zeros((n_vertices, 5), dtype=np.float64)
data[:, 0] = label.vertices
data[:, 1:4] = 1e3 * label.pos
data[:, 4] = label.values
fid.write(b'#%s\n' % label.comment.encode())
fid.write(b'%d\n' % n_vertices)
for d in data:
fid.write(b'%d %f %f %f %f\n' % tuple(d))
def _prep_label_split(label, subject=None, subjects_dir=None):
"""Get label and subject information prior to label splitting."""
# If necessary, find the label
if isinstance(label, BiHemiLabel):
raise TypeError("Can only split labels restricted to one hemisphere.")
elif isinstance(label, str):
label = read_label(label)
# Find the subject
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if label.subject is None and subject is None:
raise ValueError("The subject needs to be specified.")
elif subject is None:
subject = label.subject
elif label.subject is None:
pass
elif subject != label.subject:
raise ValueError("The label specifies a different subject (%r) from "
"the subject parameter (%r)."
% label.subject, subject)
return label, subject, subjects_dir
def _split_label_contig(label_to_split, subject=None, subjects_dir=None):
"""Split label into contiguous regions (i.e., connected components).
Parameters
----------
label_to_split : Label | str
Label which is to be split (Label object or path to a label file).
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
%(subjects_dir)s
Returns
-------
labels : list of Label
The contiguous labels, in order of descending size.
"""
# Convert to correct input if necessary
label_to_split, subject, subjects_dir = _prep_label_split(label_to_split,
subject,
subjects_dir)
# Find the spherical surface to get vertices and tris
surf_fname = '.'.join((label_to_split.hemi, 'sphere'))
surf_path = op.join(subjects_dir, subject, 'surf', surf_fname)
surface_points, surface_tris = read_surface(surf_path)
# Get vertices we want to keep and compute mesh edges
verts_arr = label_to_split.vertices
edges_all = mesh_edges(surface_tris)
# Subselect rows and cols of vertices that belong to the label
select_edges = edges_all[verts_arr][:, verts_arr].tocoo()
# Compute connected components and store as lists of vertex numbers
comp_labels = _get_components(verts_arr, select_edges)
# Convert to indices in the original surface space
label_divs = []
for comp in comp_labels:
label_divs.append(verts_arr[comp])
# Construct label division names
n_parts = len(label_divs)
if label_to_split.name.endswith(('lh', 'rh')):
basename = label_to_split.name[:-3]
name_ext = label_to_split.name[-3:]
else:
basename = label_to_split.name
name_ext = ''
name_pattern = "%s_div%%i%s" % (basename, name_ext)
names = tuple(name_pattern % i for i in range(1, n_parts + 1))
# Colors
if label_to_split.color is None:
colors = (None,) * n_parts
else:
colors = _split_colors(label_to_split.color, n_parts)
# Sort label divisions by their size (in vertices)
label_divs.sort(key=lambda x: len(x), reverse=True)
labels = []
for div, name, color in zip(label_divs, names, colors):
# Get indices of dipoles within this division of the label
verts = np.array(sorted(list(div)), int)
vert_indices = np.in1d(verts_arr, verts, assume_unique=True)
# Set label attributes
pos = label_to_split.pos[vert_indices]
values = label_to_split.values[vert_indices]
hemi = label_to_split.hemi
comment = label_to_split.comment
lbl = Label(verts, pos, values, hemi, comment, name, None, subject,
color)
labels.append(lbl)
return labels
@fill_doc
def split_label(label, parts=2, subject=None, subjects_dir=None,
freesurfer=False):
"""Split a Label into two or more parts.
Parameters
----------
label : Label | str
Label which is to be split (Label object or path to a label file).
parts : int >= 2 | tuple of str
A sequence of strings specifying label names for the new labels (from
posterior to anterior), or the number of new labels to create (default
is 2). If a number is specified, names of the new labels will be the
input label's name with div1, div2 etc. appended.
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
%(subjects_dir)s
freesurfer : bool
By default (``False``) ``split_label`` uses an algorithm that is
slightly optimized for performance and numerical precision. Set
``freesurfer`` to ``True`` in order to replicate label splits from
FreeSurfer's ``mris_divide_parcellation``.
Returns
-------
labels : list of Label, shape (n_parts,)
The labels, starting from the lowest to the highest end of the
projection axis.
Notes
-----
Works by finding the label's principal eigen-axis on the spherical surface,
projecting all label vertex coordinates onto this axis and dividing them at
regular spatial intervals.
"""
from scipy import linalg
label, subject, subjects_dir = _prep_label_split(label, subject,
subjects_dir)
# find the parts
if np.isscalar(parts):
n_parts = int(parts)
if label.name.endswith(('lh', 'rh')):
basename = label.name[:-3]
name_ext = label.name[-3:]
else:
basename = label.name
name_ext = ''
name_pattern = "%s_div%%i%s" % (basename, name_ext)
names = tuple(name_pattern % i for i in range(1, n_parts + 1))
else:
names = parts
n_parts = len(names)
if n_parts < 2:
raise ValueError("Can't split label into %i parts" % n_parts)
# find the spherical surface
surf_fname = '.'.join((label.hemi, 'sphere'))
surf_path = op.join(subjects_dir, subject, "surf", surf_fname)
surface_points, surface_tris = read_surface(surf_path)
# find the label coordinates on the surface
points = surface_points[label.vertices]
center = np.mean(points, axis=0)
centered_points = points - center
# find the label's normal
if freesurfer:
# find the Freesurfer vertex closest to the center
distance = np.sqrt(np.sum(centered_points ** 2, axis=1))
i_closest = np.argmin(distance)
closest_vertex = label.vertices[i_closest]
# find the normal according to freesurfer convention
idx = np.any(surface_tris == closest_vertex, axis=1)
tris_for_normal = surface_tris[idx]
r1 = surface_points[tris_for_normal[:, 0], :]
r2 = surface_points[tris_for_normal[:, 1], :]
r3 = surface_points[tris_for_normal[:, 2], :]
tri_normals = fast_cross_3d((r2 - r1), (r3 - r1))
normal = np.mean(tri_normals, axis=0)
normal /= linalg.norm(normal)
else:
# Normal of the center
normal = center / linalg.norm(center)
# project all vertex coordinates on the tangential plane for this point
q, _ = linalg.qr(normal[:, np.newaxis])
tangent_u = q[:, 1:]
m_obs = np.dot(centered_points, tangent_u)
# find principal eigendirection
m_cov = np.dot(m_obs.T, m_obs)
w, vr = linalg.eig(m_cov)
i = np.argmax(w)
eigendir = vr[:, i]
# project back into 3d space
axis = np.dot(tangent_u, eigendir)
# orient them from posterior to anterior
if axis[1] < 0:
axis *= -1
# project the label on the axis
proj = np.dot(points, axis)
# assign mark (new label index)
proj -= proj.min()
proj /= (proj.max() / n_parts)
mark = proj // 1
mark[mark == n_parts] = n_parts - 1
# colors
if label.color is None:
colors = (None,) * n_parts
else:
colors = _split_colors(label.color, n_parts)
# construct new labels
labels = []
for i, name, color in zip(range(n_parts), names, colors):
idx = (mark == i)
vert = label.vertices[idx]
pos = label.pos[idx]
values = label.values[idx]
hemi = label.hemi
comment = label.comment
lbl = Label(vert, pos, values, hemi, comment, name, None, subject,
color)
labels.append(lbl)
return labels
def label_sign_flip(label, src):
"""Compute sign for label averaging.
Parameters
----------
label : Label | BiHemiLabel
A label.
src : SourceSpaces
The source space over which the label is defined.
Returns
-------
flip : array
Sign flip vector (contains 1 or -1).
"""
from scipy import linalg
if len(src) != 2:
raise ValueError('Only source spaces with 2 hemisphers are accepted')
lh_vertno = src[0]['vertno']
rh_vertno = src[1]['vertno']
# get source orientations
ori = list()
if label.hemi in ('lh', 'both'):
vertices = label.vertices if label.hemi == 'lh' else label.lh.vertices
vertno_sel = np.intersect1d(lh_vertno, vertices)
ori.append(src[0]['nn'][vertno_sel])
if label.hemi in ('rh', 'both'):
vertices = label.vertices if label.hemi == 'rh' else label.rh.vertices
vertno_sel = np.intersect1d(rh_vertno, vertices)
ori.append(src[1]['nn'][vertno_sel])
if len(ori) == 0:
raise Exception('Unknown hemisphere type "%s"' % (label.hemi,))
ori = np.concatenate(ori, axis=0)
if len(ori) == 0:
return np.array([], int)
_, _, Vh = linalg.svd(ori, full_matrices=False)
# The sign of Vh is ambiguous, so we should align to the max-positive
# (outward) direction
dots = np.dot(ori, Vh[0])
if np.mean(dots) < 0:
dots *= -1
# Comparing to the direction of the first right singular vector
flip = np.sign(dots)
return flip
@verbose
def stc_to_label(stc, src=None, smooth=True, connected=False,
subjects_dir=None, verbose=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : bool
Fill in vertices on the cortical surface that are not in the source
space based on the closest source space vertex (requires
src to be a SourceSpace).
connected : bool
If True a list of connected labels will be returned in each
hemisphere. The labels are ordered in decreasing order depending
of the maximum value in the stc.
%(subjects_dir)s
%(verbose)s
Returns
-------
labels : list of Label | list of list of Label
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
if not isinstance(smooth, bool):
raise ValueError('smooth should be True or False. Got %s.' % smooth)
src = stc.subject if src is None else src
if src is None:
raise ValueError('src cannot be None if stc.subject is None')
if isinstance(src, str):
subject = src
else:
subject = stc.subject
if not isinstance(stc, SourceEstimate):
raise ValueError('SourceEstimate should be surface source estimates')
if isinstance(src, str):
if connected:
raise ValueError('The option to return only connected labels is '
'only available if source spaces are provided.')
if smooth:
msg = ("stc_to_label with smooth=True requires src to be an "
"instance of SourceSpace")
raise ValueError(msg)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
src_conn = spatial_src_adjacency(src).tocsr()
labels = []
cnt = 0
cnt_full = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
if connected: # we know src *must* be a SourceSpaces now
vertno = np.where(src[hemi_idx]['inuse'])[0]
if not len(np.setdiff1d(this_vertno, vertno)) == 0:
raise RuntimeError('stc contains vertices not present '
'in source space, did you morph?')
tmp = np.zeros((len(vertno), this_data.shape[1]))
this_vertno_idx = np.searchsorted(vertno, this_vertno)
tmp[this_vertno_idx] = this_data
this_data = tmp
offset = cnt_full + len(this_data)
this_src_adj = src_conn[cnt_full:offset, cnt_full:offset].tocoo()
this_data_abs_max = np.abs(this_data).max(axis=1)
clusters, _ = _find_clusters(this_data_abs_max, 0.,
adjacency=this_src_adj)
cnt_full += len(this_data)
# Then order clusters in descending order based on maximum value
clusters_max = np.argsort([np.max(this_data_abs_max[c])
for c in clusters])[::-1]
clusters = [clusters[k] for k in clusters_max]
clusters = [vertno[c] for c in clusters]
else:
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
if not connected:
this_labels = None
else:
this_labels = []
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
if smooth:
label = label.fill(src)
this_labels.append(label)
if not connected:
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def _verts_within_dist(graph, sources, max_dist):
"""Find all vertices wihin a maximum geodesic distance from source.
Parameters
----------
graph : scipy.sparse.csr_matrix
Sparse matrix with distances between adjacent vertices.
sources : list of int
Source vertices.
max_dist : float
Maximum geodesic distance.
Returns
-------
verts : array
Vertices within max_dist.
dist : array
Distances from source vertex.
"""
dist_map = {}
verts_added_last = []
for source in sources:
dist_map[source] = 0
verts_added_last.append(source)
# add neighbors until no more neighbors within max_dist can be found
while len(verts_added_last) > 0:
verts_added = []
for i in verts_added_last:
v_dist = dist_map[i]
row = graph[i, :]
neighbor_vert = row.indices
neighbor_dist = row.data
for j, d in zip(neighbor_vert, neighbor_dist):
n_dist = v_dist + d
if j in dist_map:
if n_dist < dist_map[j]:
dist_map[j] = n_dist
else:
if n_dist <= max_dist:
dist_map[j] = n_dist
# we found a new vertex within max_dist
verts_added.append(j)
verts_added_last = verts_added
verts = np.sort(np.array(list(dist_map.keys()), int))
dist = np.array([dist_map[v] for v in verts], int)
return verts, dist
def _grow_labels(seeds, extents, hemis, names, dist, vert, subject):
"""Parallelize grow_labels."""
labels = []
for seed, extent, hemi, name in zip(seeds, extents, hemis, names):
label_verts, label_dist = _verts_within_dist(dist[hemi], seed, extent)
# create a label
if len(seed) == 1:
seed_repr = str(seed)
else:
seed_repr = ','.join(map(str, seed))
comment = 'Circular label: seed=%s, extent=%0.1fmm' % (seed_repr,
extent)
label = Label(vertices=label_verts,
pos=vert[hemi][label_verts],
values=label_dist,
hemi=hemi,
comment=comment,
name=str(name),
subject=subject)
labels.append(label)
return labels
@fill_doc
def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
overlap=True, names=None, surface='white', colors=None):
"""Generate circular labels in source space with region growing.
This function generates a number of labels in source space by growing
regions starting from the vertices defined in "seeds". For each seed, a
label is generated containing all vertices within a maximum geodesic
distance on the white matter surface from the seed.
Parameters
----------
subject : str
Name of the subject as in SUBJECTS_DIR.
seeds : int | list
Seed, or list of seeds. Each seed can be either a vertex number or
a list of vertex numbers.
extents : array | float
Extents (radius in mm) of the labels.
hemis : array | int
Hemispheres to use for the labels (0: left, 1: right).
%(subjects_dir)s
%(n_jobs)s
Likely only useful if tens or hundreds of labels are being expanded
simultaneously. Does not apply with ``overlap=False``.
overlap : bool
Produce overlapping labels. If True (default), the resulting labels
can be overlapping. If False, each label will be grown one step at a
time, and occupied territory will not be invaded.
names : None | list of str
Assign names to the new labels (list needs to have the same length as
seeds).
surface : str
The surface used to grow the labels, defaults to the white surface.
colors : array, shape (n, 4) or (, 4) | None
How to assign colors to each label. If None then unique colors will be
chosen automatically (default), otherwise colors will be broadcast
from the array. The first three values will be interpreted as RGB
colors and the fourth column as the alpha value (commonly 1).
Returns
-------
labels : list of Label
The labels' ``comment`` attribute contains information on the seed
vertex and extent; the ``values`` attribute contains distance from the
seed in millimeters.
Notes
-----
"extents" and "hemis" can either be arrays with the same length as
seeds, which allows using a different extent and hemisphere for
label, or integers, in which case the same extent and hemisphere is
used for each label.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
n_jobs = check_n_jobs(n_jobs)
# make sure the inputs are arrays
if np.isscalar(seeds):
seeds = [seeds]
seeds = [np.atleast_1d(seed) for seed in seeds]
extents = np.atleast_1d(extents)
hemis = np.atleast_1d(hemis)
n_seeds = len(seeds)
if len(extents) != 1 and len(extents) != n_seeds:
raise ValueError('The extents parameter has to be of length 1 or '
'len(seeds)')
if len(hemis) != 1 and len(hemis) != n_seeds:
raise ValueError('The hemis parameter has to be of length 1 or '
'len(seeds)')
if colors is not None:
if len(colors.shape) == 1: # if one color for all seeds
n_colors = 1
n = colors.shape[0]
else:
n_colors, n = colors.shape
if n_colors != n_seeds and n_colors != 1:
msg = ('Number of colors (%d) and seeds (%d) are not compatible.' %
(n_colors, n_seeds))
raise ValueError(msg)
if n != 4:
msg = 'Colors must have 4 values (RGB and alpha), not %d.' % n
raise ValueError(msg)
# make the arrays the same length as seeds
if len(extents) == 1:
extents = np.tile(extents, n_seeds)
if len(hemis) == 1:
hemis = np.tile(hemis, n_seeds)
hemis = np.array(['lh' if h == 0 else 'rh' for h in hemis])
# names
if names is None:
names = ["Label_%i-%s" % items for items in enumerate(hemis)]
else:
if np.isscalar(names):
names = [names]
if len(names) != n_seeds:
raise ValueError('The names parameter has to be None or have '
'length len(seeds)')
for i, hemi in enumerate(hemis):
if not names[i].endswith(hemi):
names[i] = '-'.join((names[i], hemi))
names = np.array(names)
# load the surfaces and create the distance graphs
tris, vert, dist = {}, {}, {}
for hemi in set(hemis):
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert[hemi], tris[hemi] = read_surface(surf_fname)
dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
if overlap:
# create the patches
parallel, my_grow_labels, _ = parallel_func(_grow_labels, n_jobs)
seeds = np.array_split(np.array(seeds, dtype='O'), n_jobs)
extents = np.array_split(extents, n_jobs)
hemis = np.array_split(hemis, n_jobs)
names = np.array_split(names, n_jobs)
labels = sum(parallel(my_grow_labels(s, e, h, n, dist, vert, subject)
for s, e, h, n
in zip(seeds, extents, hemis, names)), [])
else:
# special procedure for non-overlapping labels
labels = _grow_nonoverlapping_labels(subject, seeds, extents, hemis,
vert, dist, names)
if colors is None:
# add a unique color to each label
label_colors = _n_colors(len(labels))
else:
# use specified colors
label_colors = np.empty((len(labels), 4))
label_colors[:] = colors
for label, color in zip(labels, label_colors):
label.color = color
return labels
def _grow_nonoverlapping_labels(subject, seeds_, extents_, hemis, vertices_,
graphs, names_):
"""Grow labels while ensuring that they don't overlap."""
labels = []
for hemi in set(hemis):
hemi_index = (hemis == hemi)
seeds = [seed for seed, h in zip(seeds_, hemis) if h == hemi]
extents = extents_[hemi_index]
names = names_[hemi_index]
graph = graphs[hemi] # distance graph
n_vertices = len(vertices_[hemi])
n_labels = len(seeds)
# prepare parcellation
parc = np.empty(n_vertices, dtype='int32')
parc[:] = -1
# initialize active sources
sources = {} # vert -> (label, dist_from_seed)
edge = [] # queue of vertices to process
for label, seed in enumerate(seeds):
if np.any(parc[seed] >= 0):
raise ValueError("Overlapping seeds")
parc[seed] = label
for s in np.atleast_1d(seed):
sources[s] = (label, 0.)
edge.append(s)
# grow from sources
while edge:
vert_from = edge.pop(0)
label, old_dist = sources[vert_from]
# add neighbors within allowable distance
row = graph[vert_from, :]
for vert_to, dist in zip(row.indices, row.data):
# Prevent adding a point that has already been used
# (prevents infinite loop)
if (vert_to == seeds[label]).any():
continue
new_dist = old_dist + dist
# abort if outside of extent
if new_dist > extents[label]:
continue
vert_to_label = parc[vert_to]
if vert_to_label >= 0:
_, vert_to_dist = sources[vert_to]
# abort if the vertex is occupied by a closer seed
if new_dist > vert_to_dist:
continue
elif vert_to in edge:
edge.remove(vert_to)
# assign label value
parc[vert_to] = label
sources[vert_to] = (label, new_dist)
edge.append(vert_to)
# convert parc to labels
for i in range(n_labels):
vertices = np.nonzero(parc == i)[0]
name = str(names[i])
label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
labels.append(label_)
return labels
@fill_doc
def random_parcellation(subject, n_parcel, hemi, subjects_dir=None,
surface='white', random_state=None):
"""Generate random cortex parcellation by growing labels.
This function generates a number of labels which don't intersect and
cover the whole surface. Regions are growing around randomly chosen
seeds.
Parameters
----------
subject : str
Name of the subject as in SUBJECTS_DIR.
n_parcel : int
Total number of cortical parcels.
hemi : str
Hemisphere id (ie 'lh', 'rh', 'both'). In the case
of 'both', both hemispheres are processed with (n_parcel // 2)
parcels per hemisphere.
%(subjects_dir)s
surface : str
The surface used to grow the labels, defaults to the white surface.
%(random_state)s
Returns
-------
labels : list of Label
Random cortex parcellation.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if hemi == 'both':
hemi = ['lh', 'rh']
hemis = np.atleast_1d(hemi)
# load the surfaces and create the distance graphs
tris, vert, dist = {}, {}, {}
for hemi in set(hemis):
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert[hemi], tris[hemi] = read_surface(surf_fname)
dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
# create the patches
labels = _cortex_parcellation(subject, n_parcel, hemis, vert, dist,
random_state)
# add a unique color to each label
colors = _n_colors(len(labels))
for label, color in zip(labels, colors):
label.color = color
return labels
def _cortex_parcellation(subject, n_parcel, hemis, vertices_, graphs,
random_state=None):
"""Random cortex parcellation."""
labels = []
rng = check_random_state(random_state)
for hemi in set(hemis):
parcel_size = len(hemis) * len(vertices_[hemi]) // n_parcel
graph = graphs[hemi] # distance graph
n_vertices = len(vertices_[hemi])
# prepare parcellation
parc = np.full(n_vertices, -1, dtype='int32')
# initialize active sources
s = rng.choice(range(n_vertices))
label_idx = 0
edge = [s] # queue of vertices to process
parc[s] = label_idx
label_size = 1
rest = len(parc) - 1
# grow from sources
while rest:
# if there are not free neighbors, start new parcel
if not edge:
rest_idx = np.where(parc < 0)[0]
s = rng.choice(rest_idx)
edge = [s]
label_idx += 1
label_size = 1
parc[s] = label_idx
rest -= 1
vert_from = edge.pop(0)
# add neighbors within allowable distance
# row = graph[vert_from, :]
# row_indices, row_data = row.indices, row.data
sl = slice(graph.indptr[vert_from], graph.indptr[vert_from + 1])
row_indices, row_data = graph.indices[sl], graph.data[sl]
for vert_to, dist in zip(row_indices, row_data):
vert_to_label = parc[vert_to]
# abort if the vertex is already occupied
if vert_to_label >= 0:
continue
# abort if outside of extent
if label_size > parcel_size:
label_idx += 1
label_size = 1
edge = [vert_to]
parc[vert_to] = label_idx
rest -= 1
break
# assign label value
parc[vert_to] = label_idx
label_size += 1
edge.append(vert_to)
rest -= 1
# merging small labels
# label adjacency matrix
n_labels = label_idx + 1
label_sizes = np.empty(n_labels, dtype=int)
label_conn = np.zeros([n_labels, n_labels], dtype='bool')
for i in range(n_labels):
vertices = np.nonzero(parc == i)[0]
label_sizes[i] = len(vertices)
neighbor_vertices = graph[vertices, :].indices
neighbor_labels = np.unique(np.array(parc[neighbor_vertices]))
label_conn[i, neighbor_labels] = 1
np.fill_diagonal(label_conn, 0)
# merging
label_id = range(n_labels)
while n_labels > n_parcel // len(hemis):
# smallest label and its smallest neighbor
i = np.argmin(label_sizes)
neighbors = np.nonzero(label_conn[i, :])[0]
j = neighbors[np.argmin(label_sizes[neighbors])]
# merging two labels
label_conn[j, :] += label_conn[i, :]
label_conn[:, j] += label_conn[:, i]
label_conn = np.delete(label_conn, i, 0)
label_conn = np.delete(label_conn, i, 1)
label_conn[j, j] = 0
label_sizes[j] += label_sizes[i]
label_sizes = np.delete(label_sizes, i, 0)
n_labels -= 1
vertices = np.nonzero(parc == label_id[i])[0]
parc[vertices] = label_id[j]
label_id = np.delete(label_id, i, 0)
# convert parc to labels
for i in range(n_labels):
vertices = np.nonzero(parc == label_id[i])[0]
name = 'label_' + str(i)
label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
labels.append(label_)
return labels
def _read_annot_cands(dir_name, raise_error=True):
"""List the candidate parcellations."""
if not op.isdir(dir_name):
if not raise_error:
return list()
raise IOError('Directory for annotation does not exist: %s',
dir_name)
cands = os.listdir(dir_name)
cands = sorted(set(c.replace('lh.', '').replace('rh.', '').replace(
'.annot', '')
for c in cands if '.annot' in c),
key=lambda x: x.lower())
# exclude .ctab files
cands = [c for c in cands if '.ctab' not in c]
return cands
def _read_annot(fname):
"""Read a Freesurfer annotation from a .annot file.
Note : Copied from PySurfer
Parameters
----------
fname : str
Path to annotation file
Returns
-------
annot : numpy array, shape=(n_verts)
Annotation id at each vertex
ctab : numpy array, shape=(n_entries, 5)
RGBA + label id colortable array
names : list of str
List of region names as stored in the annot file
"""
if not op.isfile(fname):
dir_name = op.split(fname)[0]
cands = _read_annot_cands(dir_name)
if len(cands) == 0:
raise IOError('No such file %s, no candidate parcellations '
'found in directory' % fname)
else:
raise IOError('No such file %s, candidate parcellations in '
'that directory:\n%s' % (fname, '\n'.join(cands)))
with open(fname, "rb") as fid:
n_verts = np.fromfile(fid, '>i4', 1)[0]
data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
annot = data[data[:, 0], 1]
ctab_exists = np.fromfile(fid, '>i4', 1)[0]
if not ctab_exists:
raise Exception('Color table not found in annotation file')
n_entries = np.fromfile(fid, '>i4', 1)[0]
if n_entries > 0:
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, '>c', length) # discard orig_tab
names = list()
ctab = np.zeros((n_entries, 5), np.int64)
for i in range(n_entries):
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16) +
ctab[i, 3] * (2 ** 24))
else:
ctab_version = -n_entries
if ctab_version != 2:
raise Exception('Color table version not supported')
n_entries = np.fromfile(fid, '>i4', 1)[0]
ctab = np.zeros((n_entries, 5), np.int64)
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, "|S%d" % length, 1) # Orig table path
entries_to_read = np.fromfile(fid, '>i4', 1)[0]
names = list()
for i in range(entries_to_read):
np.fromfile(fid, '>i4', 1) # Structure
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16))
# convert to more common alpha value
ctab[:, 3] = 255 - ctab[:, 3]
return annot, ctab, names
def _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir):
"""Get the .annot filenames and hemispheres."""
if annot_fname is not None:
# we use use the .annot file specified by the user
hemis = [op.basename(annot_fname)[:2]]
if hemis[0] not in ['lh', 'rh']:
raise ValueError('Could not determine hemisphere from filename, '
'filename has to start with "lh" or "rh".')
annot_fname = [annot_fname]
else:
# construct .annot file names for requested subject, parc, hemi
_check_option('hemi', hemi, ['lh', 'rh', 'both'])
if hemi == 'both':
hemis = ['lh', 'rh']
else:
hemis = [hemi]
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
dst = op.join(subjects_dir, subject, 'label', '%%s.%s.annot' % parc)
annot_fname = [dst % hemi_ for hemi_ in hemis]
return annot_fname, hemis
def _load_vert_pos(subject, subjects_dir, surf_name, hemi, n_expected,
extra=''):
fname_surf = op.join(subjects_dir, subject, 'surf',
'%s.%s' % (hemi, surf_name))
vert_pos, _ = read_surface(fname_surf)
vert_pos /= 1e3 # the positions in labels are in meters
if len(vert_pos) != n_expected:
raise RuntimeError('Number of surface vertices (%s) for subject %s'
' does not match the expected number of vertices'
'(%s)%s'
% (len(vert_pos), subject, n_expected, extra))
return vert_pos
@verbose
def read_labels_from_annot(subject, parc='aparc', hemi='both',
surf_name='white', annot_fname=None, regexp=None,
subjects_dir=None, sort=True, verbose=None):
"""Read labels from a FreeSurfer annotation file.
Note: Only cortical labels will be returned.
Parameters
----------
subject : str
The subject for which to read the parcellation.
parc : str
The parcellation to use, e.g., 'aparc' or 'aparc.a2009s'.
hemi : str
The hemisphere from which to read the parcellation, can be 'lh', 'rh',
or 'both'.
surf_name : str
Surface used to obtain vertex locations, e.g., 'white', 'pial'.
annot_fname : str or None
Filename of the .annot file. If not None, only this file is read
and 'parc' and 'hemi' are ignored.
regexp : str
Regular expression or substring to select particular labels from the
parcellation. E.g. 'superior' will return all labels in which this
substring is contained.
%(subjects_dir)s
sort : bool
If true, labels will be sorted by name before being returned.
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
labels : list of Label
The labels, sorted by label name (ascending).
See Also
--------
write_labels_to_annot
morph_labels
"""
logger.info('Reading labels from parcellation...')
subjects_dir = get_subjects_dir(subjects_dir)
# get the .annot filenames and hemispheres
annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
subjects_dir)
if regexp is not None:
# allow for convenient substring match
r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum()
else regexp))
# now we are ready to create the labels
n_read = 0
labels = list()
orig_names = set()
for fname, hemi in zip(annot_fname, hemis):
# read annotation
annot, ctab, label_names = _read_annot(fname)
label_rgbas = ctab[:, :4] / 255.
label_ids = ctab[:, -1]
# load the vertex positions from surface
vert_pos = _load_vert_pos(
subject, subjects_dir, surf_name, hemi, len(annot),
extra='for annotation file %s' % fname)
for label_id, label_name, label_rgba in\
zip(label_ids, label_names, label_rgbas):
vertices = np.where(annot == label_id)[0]
if len(vertices) == 0:
# label is not part of cortical surface
continue
label_name = label_name.decode()
orig_names.add(label_name)
name = f'{label_name}-{hemi}'
if (regexp is not None) and not r_.match(name):
continue
pos = vert_pos[vertices, :]
label = Label(vertices, pos, hemi=hemi, name=name,
subject=subject, color=tuple(label_rgba))
labels.append(label)
n_read = len(labels) - n_read
logger.info(' read %d labels from %s' % (n_read, fname))
# sort the labels by label name
if sort:
labels = sorted(labels, key=lambda l: l.name)
if len(labels) == 0:
msg = 'No labels found.'
if regexp is not None:
orig_names = '\n'.join(sorted(orig_names))
msg += (f' Maybe the regular expression {repr(regexp)} did not '
f'match any of:\n{orig_names}')
raise RuntimeError(msg)
return labels
def _check_labels_subject(labels, subject, name):
_validate_type(labels, (list, tuple), 'labels')
for label in labels:
_validate_type(label, Label, 'each entry in labels')
if subject is None:
subject = label.subject
if subject is not None: # label.subject can be None, depending on init
if subject != label.subject:
raise ValueError('Got multiple values of %s: %s and %s'
% (name, subject, label.subject))
if subject is None:
raise ValueError('if label.subject is None for all labels, '
'%s must be provided' % name)
return subject
@verbose
def morph_labels(labels, subject_to, subject_from=None, subjects_dir=None,
surf_name='white', verbose=None):
"""Morph a set of labels.
This is useful when morphing a set of non-overlapping labels (such as those
obtained with :func:`read_labels_from_annot`) from one subject to
another.
Parameters
----------
labels : list
The labels to morph.
subject_to : str
The subject to morph labels to.
subject_from : str | None
The subject to morph labels from. Can be None if the labels
have the ``.subject`` property defined.
%(subjects_dir)s
surf_name : str
Surface used to obtain vertex locations, e.g., 'white', 'pial'.
%(verbose)s
Returns
-------
labels : list
The morphed labels.
See Also
--------
read_labels_from_annot
mne.Label.morph
Notes
-----
This does not use the same algorithm as Freesurfer, so the results
morphing (e.g., from ``'fsaverage'`` to your subject) might not match
what Freesurfer produces during ``recon-all``.
.. versionadded:: 0.18
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_from = _check_labels_subject(labels, subject_from, 'subject_from')
mmaps = read_morph_map(subject_from, subject_to, subjects_dir)
vert_poss = [_load_vert_pos(subject_to, subjects_dir, surf_name, hemi,
mmap.shape[0])
for hemi, mmap in zip(('lh', 'rh'), mmaps)]
idxs = [mmap.argmax(axis=1) for mmap in mmaps]
out_labels = list()
values = filename = None
for label in labels:
li = dict(lh=0, rh=1)[label.hemi]
vertices = np.where(np.in1d(idxs[li], label.vertices))[0]
pos = vert_poss[li][vertices]
out_labels.append(
Label(vertices, pos, values, label.hemi, label.comment, label.name,
filename, subject_to, label.color, label.verbose))
return out_labels
@verbose
def labels_to_stc(labels, values, tmin=0, tstep=1, subject=None, src=None,
verbose=None):
"""Convert a set of labels and values to a STC.
This function is meant to work like the opposite of
`extract_label_time_course`.
Parameters
----------
%(eltc_labels)s
values : ndarray, shape (n_labels, ...)
The values in each label. Can be 1D or 2D.
tmin : float
The tmin to use for the STC.
tstep : float
The tstep to use for the STC.
subject : str | None
The subject for which to create the STC.
%(eltc_src)s
Can be omitted if using a surface source space, in which case
the label vertices will determine the output STC vertices.
Required if using a volumetric source space.
.. versionadded:: 0.22
%(verbose)s
Returns
-------
stc : instance of SourceEstimate | instance of VolSourceEstimate
The values-in-labels converted to a STC.
See Also
--------
extract_label_time_course
Notes
-----
Vertices that appear in more than one label will be averaged.
.. versionadded:: 0.18
"""
values = np.array(values, float)
if values.ndim == 1:
values = values[:, np.newaxis]
if values.ndim != 2:
raise ValueError('values must have 1 or 2 dimensions, got %s'
% (values.ndim,))
_validate_type(src, (SourceSpaces, None))
if src is None:
data, vertices, subject = _labels_to_stc_surf(
labels, values, tmin, tstep, subject)
klass = SourceEstimate
else:
kind = src.kind
subject = _check_subject(
src._subject, subject, first_kind='source space subject',
raise_error=False)
_check_option('source space kind', kind, ('surface', 'volume'))
if kind == 'volume':
klass = VolSourceEstimate
else:
klass = SourceEstimate
# Easiest way is to get a dot-able operator and use it
vertices = [s['vertno'].copy() for s in src]
stc = klass(
np.eye(sum(len(v) for v in vertices)), vertices, 0, 1, subject)
label_op = extract_label_time_course(
stc, labels, src=src, mode='mean', allow_empty=True)
_check_values_labels(values, label_op.shape[0])
rev_op = np.zeros(label_op.shape[::-1])
rev_op[np.arange(label_op.shape[1]), np.argmax(label_op, axis=0)] = 1.
data = rev_op @ values
return klass(data, vertices, tmin, tstep, subject, verbose)
def _check_values_labels(values, n_labels):
if n_labels != len(values):
raise ValueError(
f'values.shape[0] ({values.shape[0]}) must match the number of '
f'labels ({n_labels})')
def _labels_to_stc_surf(labels, values, tmin, tstep, subject):
from scipy import sparse
subject = _check_labels_subject(labels, subject, 'subject')
_check_values_labels(values, len(labels))
vertices = dict(lh=[], rh=[])
data = dict(lh=[], rh=[])
for li, label in enumerate(labels):
data[label.hemi].append(
np.repeat(values[li][np.newaxis], len(label.vertices), axis=0))
vertices[label.hemi].append(label.vertices)
hemis = ('lh', 'rh')
for hemi in hemis:
vertices[hemi] = np.concatenate(vertices[hemi], axis=0)
data[hemi] = np.concatenate(data[hemi], axis=0).astype(float)
cols = np.arange(len(vertices[hemi]))
vertices[hemi], rows = np.unique(vertices[hemi], return_inverse=True)
mat = sparse.coo_matrix((np.ones(len(rows)), (rows, cols))).tocsr()
mat = mat * sparse.diags(1. / np.asarray(mat.sum(axis=-1))[:, 0])
data[hemi] = mat.dot(data[hemi])
vertices = [vertices[hemi] for hemi in hemis]
data = np.concatenate([data[hemi] for hemi in hemis], axis=0)
return data, vertices, subject
_DEFAULT_TABLE_NAME = 'MNE-Python Colortable'
def _write_annot(fname, annot, ctab, names, table_name=_DEFAULT_TABLE_NAME):
"""Write a Freesurfer annotation to a .annot file."""
assert len(names) == len(ctab)
with open(fname, 'wb') as fid:
n_verts = len(annot)
np.array(n_verts, dtype='>i4').tofile(fid)
data = np.zeros((n_verts, 2), dtype='>i4')
data[:, 0] = np.arange(n_verts)
data[:, 1] = annot
data.ravel().tofile(fid)
# indicate that color table exists
np.array(1, dtype='>i4').tofile(fid)
# color table version 2
np.array(-2, dtype='>i4').tofile(fid)
# write color table
n_entries = len(ctab)
np.array(n_entries, dtype='>i4').tofile(fid)
# write our color table name
_write_annot_str(fid, table_name)
# number of entries to write
np.array(n_entries, dtype='>i4').tofile(fid)
# write entries
for ii, (name, color) in enumerate(zip(names, ctab)):
np.array(ii, dtype='>i4').tofile(fid)
_write_annot_str(fid, name)
np.array(color[:4], dtype='>i4').tofile(fid)
def _write_annot_str(fid, s):
s = s.encode('ascii') + b'\x00'
np.array(len(s), '>i4').tofile(fid)
fid.write(s)
@verbose
def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
subjects_dir=None, annot_fname=None,
colormap='hsv', hemi='both', sort=True,
table_name=_DEFAULT_TABLE_NAME, verbose=None):
r"""Create a FreeSurfer annotation from a list of labels.
Parameters
----------
labels : list with instances of mne.Label
The labels to create a parcellation from.
subject : str | None
The subject for which to write the parcellation.
parc : str | None
The parcellation name to use.
overwrite : bool
Overwrite files if they already exist.
%(subjects_dir)s
annot_fname : str | None
Filename of the .annot file. If not None, only this file is written
and 'parc' and 'subject' are ignored.
colormap : str
Colormap to use to generate label colors for labels that do not
have a color specified.
hemi : 'both' | 'lh' | 'rh'
The hemisphere(s) for which to write \*.annot files (only applies if
annot_fname is not specified; default is 'both').
sort : bool
If True (default), labels will be sorted by name before writing.
.. versionadded:: 0.21.0
table_name : str
The table name to use for the colortable.
.. versionadded:: 0.21.0
%(verbose)s
See Also
--------
read_labels_from_annot
Notes
-----
Vertices that are not covered by any of the labels are assigned to a label
named "unknown".
"""
logger.info('Writing labels to parcellation...')
subjects_dir = get_subjects_dir(subjects_dir)
# get the .annot filenames and hemispheres
annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
subjects_dir)
if not overwrite:
for fname in annot_fname:
if op.exists(fname):
raise ValueError('File %s exists. Use "overwrite=True" to '
'overwrite it' % fname)
# prepare container for data to save:
to_save = []
# keep track of issues found in the labels
duplicate_colors = []
invalid_colors = []
overlap = []
no_color = (-1, -1, -1, -1)
no_color_rgb = (-1, -1, -1)
for hemi, fname in zip(hemis, annot_fname):
hemi_labels = [label for label in labels if label.hemi == hemi]
n_hemi_labels = len(hemi_labels)
if n_hemi_labels == 0:
ctab = np.empty((0, 4), dtype=np.int32)
ctab_rgb = ctab[:, :3]
else:
if sort:
hemi_labels.sort(key=lambda label: label.name)
# convert colors to 0-255 RGBA tuples
hemi_colors = [no_color if label.color is None else
tuple(int(round(255 * i)) for i in label.color)
for label in hemi_labels]
ctab = np.array(hemi_colors, dtype=np.int32)
ctab_rgb = ctab[:, :3]
# make color dict (for annot ID, only R, G and B count)
labels_by_color = defaultdict(list)
for label, color in zip(hemi_labels, ctab_rgb):
labels_by_color[tuple(color)].append(label.name)
# check label colors
for color, names in labels_by_color.items():
if color == no_color_rgb:
continue
if color == (0, 0, 0):
# we cannot have an all-zero color, otherw. e.g. tksurfer
# refuses to read the parcellation
warn('At least one label contains a color with, "r=0, '
'g=0, b=0" value. Some FreeSurfer tools may fail '
'to read the parcellation')
if any(i > 255 for i in color):
msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
invalid_colors.append(msg)
if len(names) > 1:
msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
duplicate_colors.append(msg)
# replace None values (labels with unspecified color)
if labels_by_color[no_color_rgb]:
default_colors = _n_colors(n_hemi_labels, bytes_=True,
cmap=colormap)
# keep track of colors known to be in hemi_colors :
safe_color_i = 0
for i in range(n_hemi_labels):
if ctab[i, 0] == -1:
color = default_colors[i]
# make sure to add no duplicate color
while np.any(np.all(color[:3] == ctab_rgb, 1)):
color = default_colors[safe_color_i]
safe_color_i += 1
# assign the color
ctab[i] = color
# find number of vertices in surface
if subject is not None and subjects_dir is not None:
fpath = op.join(subjects_dir, subject, 'surf', '%s.white' % hemi)
points, _ = read_surface(fpath)
n_vertices = len(points)
else:
if len(hemi_labels) > 0:
max_vert = max(np.max(label.vertices) for label in hemi_labels)
n_vertices = max_vert + 1
else:
n_vertices = 1
warn('Number of vertices in the surface could not be '
'verified because the surface file could not be found; '
'specify subject and subjects_dir parameters.')
# Create annot and color table array to write
annot = np.empty(n_vertices, dtype=np.int64)
annot[:] = -1
# create the annotation ids from the colors
annot_id_coding = np.array((1, 2 ** 8, 2 ** 16))
annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
for label, annot_id in zip(hemi_labels, annot_ids):
# make sure the label is not overwriting another label
if np.any(annot[label.vertices] != -1):
other_ids = set(annot[label.vertices])
other_ids.discard(-1)
other_indices = (annot_ids.index(i) for i in other_ids)
other_names = (hemi_labels[i].name for i in other_indices)
other_repr = ', '.join(other_names)
msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
overlap.append(msg)
annot[label.vertices] = annot_id
hemi_names = [label.name for label in hemi_labels]
if None in hemi_names:
msg = ("Found %i labels with no name. Writing annotation file"
"requires all labels named" % (hemi_names.count(None)))
# raise the error immediately rather than crash with an
# uninformative error later (e.g. cannot join NoneType)
raise ValueError(msg)
# Assign unlabeled vertices to an "unknown" label
unlabeled = (annot == -1)
if np.any(unlabeled):
msg = ("Assigning %i unlabeled vertices to "
"'unknown-%s'" % (unlabeled.sum(), hemi))
logger.info(msg)
# find an unused color (try shades of gray first)
for i in range(1, 257):
if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
break
if i < 256:
color = (i, i, i, 0)
else:
err = ("Need one free shade of gray for 'unknown' label. "
"Please modify your label colors, or assign the "
"unlabeled vertices to another label.")
raise ValueError(err)
# find the id
annot_id = np.sum(annot_id_coding * color[:3])
# update data to write
annot[unlabeled] = annot_id
ctab = np.vstack((ctab, color))
hemi_names.append("unknown")
# convert to FreeSurfer alpha values
ctab[:, 3] = 255 - ctab[:, 3]
# remove hemi ending in names
hemi_names = [name[:-3] if name.endswith(hemi) else name
for name in hemi_names]
to_save.append((fname, annot, ctab, hemi_names))
issues = []
if duplicate_colors:
msg = ("Some labels have the same color values (all labels in one "
"hemisphere must have a unique color):")
duplicate_colors.insert(0, msg)
issues.append('\n'.join(duplicate_colors))
if invalid_colors:
msg = ("Some labels have invalid color values (all colors should be "
"RGBA tuples with values between 0 and 1)")
invalid_colors.insert(0, msg)
issues.append('\n'.join(invalid_colors))
if overlap:
msg = ("Some labels occupy vertices that are also occupied by one or "
"more other labels. Each vertex can only be occupied by a "
"single label in *.annot files.")
overlap.insert(0, msg)
issues.append('\n'.join(overlap))
if issues:
raise ValueError('\n\n'.join(issues))
# write it
for fname, annot, ctab, hemi_names in to_save:
logger.info(' writing %d labels to %s' % (len(hemi_names), fname))
_write_annot(fname, annot, ctab, hemi_names, table_name)
@fill_doc
def select_sources(subject, label, location='center', extent=0.,
grow_outside=True, subjects_dir=None, name=None,
random_state=None, surf='white'):
"""Select sources from a label.
Parameters
----------
%(subject)s
label : instance of Label | str
Define where the seed will be chosen. If str, can be 'lh' or 'rh',
which correspond to left or right hemisphere, respectively.
location : 'random' | 'center' | int
Location to grow label from. If the location is an int, it represents
the vertex number in the corresponding label. If it is a str, it can be
either 'random' or 'center'.
extent : float
Extents (radius in mm) of the labels, i.e. maximum geodesic distance
on the white matter surface from the seed. If 0, the resulting label
will contain only one vertex.
grow_outside : bool
Let the region grow outside the original label where location was
defined.
%(subjects_dir)s
name : None | str
Assign name to the new label.
%(random_state)s
surf : str
The surface used to simulated the label, defaults to the white surface.
Returns
-------
label : instance of Label
The label that contains the selected sources.
Notes
-----
This function selects a region of interest on the cortical surface based
on a label (or a hemisphere). The sources are selected by growing a region
around a seed which is selected randomly, is the center of the label, or
is a specific vertex. The selected vertices can extend beyond the initial
provided label. This can be prevented by setting grow_outside to False.
The selected sources are returned in the form of a new Label object. The
values of the label contain the distance from the seed in millimeters.
.. versionadded:: 0.18
"""
# If label is a string, convert it to a label that contains the whole
# hemisphere.
if isinstance(label, str):
_check_option('label', label, ['lh', 'rh'])
surf_filename = op.join(subjects_dir, subject, 'surf',
label + '.white')
vertices, _ = read_surface(surf_filename)
indices = np.arange(len(vertices), dtype=int)
label = Label(indices, vertices, hemi=label)
# Choose the seed according to the selected strategy.
if isinstance(location, str):
_check_option('location', location, ['center', 'random'])
if location == 'center':
seed = label.center_of_mass(
subject, restrict_vertices=True, subjects_dir=subjects_dir,
surf=surf)
else:
rng = check_random_state(random_state)
seed = rng.choice(label.vertices)
else:
seed = label.vertices[location]
hemi = 0 if label.hemi == 'lh' else 1
new_label = grow_labels(subject, seed, extent, hemi, subjects_dir)[0]
# We override the name because grow_label automatically adds a -rh or -lh
# to the given parameter.
new_label.name = name
# Restrict the new label to the vertices of the input label if needed.
if not grow_outside:
to_keep = np.array([v in label.vertices for v in new_label.vertices])
new_label = Label(new_label.vertices[to_keep], new_label.pos[to_keep],
hemi=new_label.hemi, name=name, subject=subject)
return new_label
def find_pos_in_annot(pos, subject='fsaverage', annot='aparc+aseg',
subjects_dir=None):
"""
Find name in atlas for given MRI coordinates.
Parameters
----------
pos : ndarray, shape (3,)
Vector of x,y,z coordinates in MRI space.
subject : str
MRI subject name.
annot : str
MRI volumetric atlas file name. Do not include the ``.mgz`` suffix.
subjects_dir : path-like
Path to MRI subjects directory.
Returns
-------
label : str
Anatomical region name from atlas.
Notes
-----
.. versionadded:: 0.24
"""
pos = np.asarray(pos, float)
if pos.shape != (3,):
raise ValueError(
'pos must be an array of shape (3,), ' f'got {pos.shape}')
nibabel = _import_nibabel('read MRI parcellations')
if subjects_dir is None:
subjects_dir = get_subjects_dir(None)
atlas_fname = os.path.join(subjects_dir, subject, 'mri', annot + '.mgz')
parcellation_img = nibabel.load(atlas_fname)
# Load freesurface atlas LUT
lut_inv_dict = read_freesurfer_lut()[0]
label_lut = {v: k for k, v in lut_inv_dict.items()}
# Find voxel for dipole position
mri_vox_t = np.linalg.inv(parcellation_img.header.get_vox2ras_tkr())
vox_dip_pos_f = apply_trans(mri_vox_t, pos)
vox_dip_pos = np.rint(vox_dip_pos_f).astype(int)
# Get voxel value and label from LUT
vol_values = parcellation_img.get_fdata()[tuple(vox_dip_pos.T)]
label = label_lut.get(vol_values, 'Unknown')
return label
|
PetePriority/home-assistant
|
refs/heads/dev
|
homeassistant/components/maxcube/binary_sensor.py
|
1
|
"""
Support for MAX! Window Shutter via MAX! Cube.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/maxcube/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.maxcube import DATA_KEY
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Iterate through all MAX! Devices and add window shutters."""
devices = []
for handler in hass.data[DATA_KEY].values():
cube = handler.cube
for device in cube.devices:
name = "{} {}".format(
cube.room_by_id(device.room_id).name, device.name)
# Only add Window Shutters
if cube.is_windowshutter(device):
devices.append(
MaxCubeShutter(handler, name, device.rf_address))
if devices:
add_entities(devices)
class MaxCubeShutter(BinarySensorDevice):
"""Representation of a MAX! Cube Binary Sensor device."""
def __init__(self, handler, name, rf_address):
"""Initialize MAX! Cube BinarySensorDevice."""
self._name = name
self._sensor_type = 'window'
self._rf_address = rf_address
self._cubehandle = handler
self._state = None
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the BinarySensorDevice."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._sensor_type
@property
def is_on(self):
"""Return true if the binary sensor is on/open."""
return self._state
def update(self):
"""Get latest data from MAX! Cube."""
self._cubehandle.update()
device = self._cubehandle.cube.device_by_rf(self._rf_address)
self._state = device.is_open
|
DonBeo/statsmodels
|
refs/heads/master
|
statsmodels/datasets/anes96/data.py
|
25
|
"""American National Election Survey 1996"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
http://www.electionstudies.org/
The American National Election Studies.
"""
DESCRSHORT = """This data is a subset of the American National Election Studies of 1996."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of observations - 944
Number of variables - 10
Variables name definitions::
popul - Census place population in 1000s
TVnews - Number of times per week that respondent watches TV news.
PID - Party identification of respondent.
0 - Strong Democrat
1 - Weak Democrat
2 - Independent-Democrat
3 - Independent-Indpendent
4 - Independent-Republican
5 - Weak Republican
6 - Strong Republican
age : Age of respondent.
educ - Education level of respondent
1 - 1-8 grades
2 - Some high school
3 - High school graduate
4 - Some college
5 - College degree
6 - Master's degree
7 - PhD
income - Income of household
1 - None or less than $2,999
2 - $3,000-$4,999
3 - $5,000-$6,999
4 - $7,000-$8,999
5 - $9,000-$9,999
6 - $10,000-$10,999
7 - $11,000-$11,999
8 - $12,000-$12,999
9 - $13,000-$13,999
10 - $14,000-$14.999
11 - $15,000-$16,999
12 - $17,000-$19,999
13 - $20,000-$21,999
14 - $22,000-$24,999
15 - $25,000-$29,999
16 - $30,000-$34,999
17 - $35,000-$39,999
18 - $40,000-$44,999
19 - $45,000-$49,999
20 - $50,000-$59,999
21 - $60,000-$74,999
22 - $75,000-89,999
23 - $90,000-$104,999
24 - $105,000 and over
vote - Expected vote
0 - Clinton
1 - Dole
The following 3 variables all take the values:
1 - Extremely liberal
2 - Liberal
3 - Slightly liberal
4 - Moderate
5 - Slightly conservative
6 - Conservative
7 - Extremely Conservative
selfLR - Respondent's self-reported political leanings from "Left"
to "Right".
ClinLR - Respondents impression of Bill Clinton's political
leanings from "Left" to "Right".
DoleLR - Respondents impression of Bob Dole's political leanings
from "Left" to "Right".
logpopul - log(popul + .1)
"""
from numpy import recfromtxt, column_stack, array, log
import numpy.lib.recfunctions as nprf
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""Load the anes96 data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=5, exog_idx=[10,2,6,7,8],
dtype=float)
def load_pandas():
"""Load the anes96 data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=5, exog_idx=[10,2,6,7,8],
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/anes96.csv',"rb"), delimiter="\t",
names = True, dtype=float)
logpopul = log(data['popul'] + .1)
data = nprf.append_fields(data, 'logpopul', logpopul, usemask=False,
asrecarray=True)
return data
|
llevar/germline-regenotyper
|
refs/heads/master
|
examples/scripts/prepare_freebayes_genotyping_config.py
|
3
|
#!/usr/bin/env python
import sys
import os
import uuid
from time import sleep
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from sqlalchemy import or_, and_
import json
import logging
import argparse
import tracker.model.analysis
import tracker.model.analysis_run
import tracker.model.configuration
from tracker.util import connection
def parse_args():
my_parser = argparse.ArgumentParser()
sub_parsers = my_parser.add_subparsers()
create_configs_parser = sub_parsers.add_parser("create-configs", conflict_handler='resolve')
create_configs_parser.add_argument("-a", "--analysis_id", help="ID of the analysis to run.", dest="analysis_id", required=True)
create_configs_parser.add_argument("-n", "--num_runs", help="Number of runs to create configurations for.", dest="num_runs", required=True, type=int)
create_configs_parser.add_argument("-t", "--tissue_type", help="Tumor or normal tissue", dest="tissue_type", choices = ["tumor", "normal"], required=True)
create_configs_parser.add_argument("-c", "--config_location", help="Path to a directory where the generated config files will be stored.", dest="config_location", required=True)
create_configs_parser.set_defaults(func=create_configs_command)
my_args = my_parser.parse_args()
return my_args
def get_available_samples(analysis_id, tissue_type, num_runs):
#PCAWG Samples are in their own database
Base = automap_base()
sample_engine = create_engine('PUT_DB_URL_HERE')
Base.prepare(sample_engine, reflect=True)
PCAWGSample = Base.classes.pcawg_samples
SampleLocation = Base.classes.sample_locations
sample_session = Session(sample_engine)
#Butler run tracking is in its own database
Analysis = connection.Base.classes.analysis
AnalysisRun = connection.Base.classes.analysis_run
Configuration = connection.Base.classes.configuration
run_session = connection.Session()
if tissue_type == "normal":
sample_id = PCAWGSample.normal_wgs_alignment_gnos_id
sample_location = SampleLocation.normal_sample_location
else:
sample_id = PCAWGSample.tumor_wgs_alignment_gnos_id
sample_location = SampleLocation.tumor_sample_location
current_runs = run_session.query(Configuration.config[("sample"," sample_id")].astext).\
join(AnalysisRun, AnalysisRun.config_id == Configuration.config_id).\
join(Analysis, Analysis.analysis_id == AnalysisRun.analysis_id).\
filter(and_(Analysis.analysis_id == analysis_id, AnalysisRun.run_status != tracker.model.analysis_run.RUN_STATUS_ERROR)).all()
available_samples = sample_session.query(PCAWGSample.index.label("index"), sample_id.label("sample_id"), sample_location.label("sample_location")).\
join(SampleLocation, PCAWGSample.index == SampleLocation.donor_index).\
filter(and_(sample_location != None, sample_id.notin_(current_runs))).\
limit(num_runs).all()
run_session.close()
connection.engine.dispose()
sample_session.close()
sample_engine.dispose()
return available_samples, len(available_samples)
def write_config_to_file(config, config_location):
run_uuid = str(uuid.uuid4())
my_file = open("{}/{}.json".format(config_location, run_uuid), "w")
json.dump(config, my_file)
my_file.close()
def generate_config_objects(available_samples, num_runs, config_location):
for this_run in range(num_runs):
this_config_data = {"sample": {
"donor_index": available_samples[this_run].index,
"sample_id": available_samples[this_run].sample_id.split(",")[0],
"sample_location": available_samples[this_run].sample_location
}
}
yield this_config_data
def create_configs_command(args):
analysis_id = args.analysis_id
num_runs = args.num_runs
tissue_type = args.tissue_type
config_location = args.config_location
available_samples, num_available_samples = get_available_samples(analysis_id, tissue_type, num_runs)
if num_available_samples < num_runs:
print "Only found {} available samples to run. Will create {} run configurations.".format(str(num_available_samples), str(num_available_samples))
num_runs = num_available_samples
if (not os.path.isdir(config_location)):
os.makedirs(config_location)
for config in generate_config_objects(available_samples, num_runs, config_location):
write_config_to_file(config, config_location)
if __name__ == '__main__':
args = parse_args()
args.func(args)
|
yoshinorim/mysql-5.6
|
refs/heads/fb-mysql-5.6.35
|
xtrabackup/test/python/testtools/tests/test_fixturesupport.py
|
42
|
# Copyright (c) 2010 testtools developers. See LICENSE for details.
import unittest
from testtools import (
TestCase,
content,
content_type,
)
from testtools.helpers import try_import
from testtools.tests.helpers import (
ExtendedTestResult,
)
fixtures = try_import('fixtures')
LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture')
class TestFixtureSupport(TestCase):
def setUp(self):
super(TestFixtureSupport, self).setUp()
if fixtures is None or LoggingFixture is None:
self.skipTest("Need fixtures")
def test_useFixture(self):
fixture = LoggingFixture()
class SimpleTest(TestCase):
def test_foo(self):
self.useFixture(fixture)
result = unittest.TestResult()
SimpleTest('test_foo').run(result)
self.assertTrue(result.wasSuccessful())
self.assertEqual(['setUp', 'cleanUp'], fixture.calls)
def test_useFixture_cleanups_raise_caught(self):
calls = []
def raiser(ignored):
calls.append('called')
raise Exception('foo')
fixture = fixtures.FunctionFixture(lambda:None, raiser)
class SimpleTest(TestCase):
def test_foo(self):
self.useFixture(fixture)
result = unittest.TestResult()
SimpleTest('test_foo').run(result)
self.assertFalse(result.wasSuccessful())
self.assertEqual(['called'], calls)
def test_useFixture_details_captured(self):
class DetailsFixture(fixtures.Fixture):
def setUp(self):
fixtures.Fixture.setUp(self)
self.addCleanup(delattr, self, 'content')
self.content = ['content available until cleanUp']
self.addDetail('content',
content.Content(content_type.UTF8_TEXT, self.get_content))
def get_content(self):
return self.content
fixture = DetailsFixture()
class SimpleTest(TestCase):
def test_foo(self):
self.useFixture(fixture)
# Add a colliding detail (both should show up)
self.addDetail('content',
content.Content(content_type.UTF8_TEXT, lambda:['foo']))
result = ExtendedTestResult()
SimpleTest('test_foo').run(result)
self.assertEqual('addSuccess', result._events[-2][0])
details = result._events[-2][2]
self.assertEqual(['content', 'content-1'], sorted(details.keys()))
self.assertEqual('foo', ''.join(details['content'].iter_text()))
self.assertEqual('content available until cleanUp',
''.join(details['content-1'].iter_text()))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
|
dpodder/coreclr
|
refs/heads/master
|
src/scripts/genEventPipe.py
|
3
|
from __future__ import print_function
from genXplatEventing import *
from genXplatLttng import *
import os
import xml.dom.minidom as DOM
stdprolog = """// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/******************************************************************
DO NOT MODIFY. AUTOGENERATED FILE.
This file is generated using the logic from <root>/src/scripts/genEventPipe.py
******************************************************************/
"""
stdprolog_cmake = """#
#
#******************************************************************
#DO NOT MODIFY. AUTOGENERATED FILE.
#This file is generated using the logic from <root>/src/scripts/genEventPipe.py
#******************************************************************
"""
def generateClrEventPipeWriteEventsImpl(
providerName, eventNodes, allTemplates, exclusionListFile):
providerPrettyName = providerName.replace("Windows-", '')
providerPrettyName = providerPrettyName.replace("Microsoft-", '')
providerPrettyName = providerPrettyName.replace('-', '_')
WriteEventImpl = []
# EventPipeEvent declaration
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
WriteEventImpl.append(
"EventPipeEvent *EventPipeEvent" +
eventName +
" = nullptr;\n")
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
# generate EventPipeEventEnabled function
eventEnabledImpl = """bool EventPipeEventEnabled%s()
{
return EventPipeEvent%s->IsEnabled();
}
""" % (eventName, eventName)
WriteEventImpl.append(eventEnabledImpl)
# generate EventPipeWriteEvent function
fnptype = []
linefnptype = []
fnptype.append("extern \"C\" ULONG EventPipeWriteEvent")
fnptype.append(eventName)
fnptype.append("(\n")
if templateName:
template = allTemplates[templateName]
else:
template = None
if template:
fnSig = template.signature
for paramName in fnSig.paramlist:
fnparam = fnSig.getParam(paramName)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if paramName in template.structs:
linefnptype.append(
"%sint %s_ElementSize,\n" %
(lindent, paramName))
linefnptype.append(lindent)
linefnptype.append(typewName)
if countw != " ":
linefnptype.append(countw)
linefnptype.append(" ")
linefnptype.append(fnparam.name)
linefnptype.append(",\n")
if len(linefnptype) > 0:
del linefnptype[-1]
fnptype.extend(linefnptype)
fnptype.append(")\n{\n")
checking = """ if (!EventPipeEventEnabled%s())
return ERROR_SUCCESS;
""" % (eventName)
fnptype.append(checking)
WriteEventImpl.extend(fnptype)
if template:
body = generateWriteEventBody(template, providerName, eventName)
WriteEventImpl.append(body)
else:
WriteEventImpl.append(
" EventPipe::WriteEvent(*EventPipeEvent" +
eventName +
", (BYTE*) nullptr, 0);\n")
WriteEventImpl.append("\n return ERROR_SUCCESS;\n}\n\n")
# EventPipeProvider and EventPipeEvent initialization
WriteEventImpl.append(
"extern \"C\" void Init" +
providerPrettyName +
"()\n{\n")
WriteEventImpl.append(
" EventPipeProvider" +
providerPrettyName +
" = EventPipe::CreateProvider(SL(" +
providerPrettyName +
"Name));\n")
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
eventKeywords = eventNode.getAttribute('keywords')
eventKeywordsMask = generateEventKeywords(eventKeywords)
eventValue = eventNode.getAttribute('value')
eventVersion = eventNode.getAttribute('version')
eventLevel = eventNode.getAttribute('level')
eventLevel = eventLevel.replace("win:", "EventPipeEventLevel::")
exclusionInfo = parseExclusionList(exclusionListFile)
taskName = eventNode.getAttribute('task')
initEvent = """ EventPipeEvent%s = EventPipeProvider%s->AddEvent(%s,%s,%s,%s);
""" % (eventName, providerPrettyName, eventValue, eventKeywordsMask, eventVersion, eventLevel)
WriteEventImpl.append(initEvent)
WriteEventImpl.append("}")
return ''.join(WriteEventImpl)
def generateWriteEventBody(template, providerName, eventName):
header = """
char stackBuffer[%s];
char *buffer = stackBuffer;
unsigned int offset = 0;
unsigned int size = %s;
bool fixedBuffer = true;
bool success = true;
""" % (template.estimated_size, template.estimated_size)
fnSig = template.signature
pack_list = []
for paramName in fnSig.paramlist:
parameter = fnSig.getParam(paramName)
if paramName in template.structs:
size = "(int)%s_ElementSize * (int)%s" % (
paramName, parameter.prop)
if template.name in specialCaseSizes and paramName in specialCaseSizes[template.name]:
size = "(int)(%s)" % specialCaseSizes[template.name][paramName]
pack_list.append(
" success &= WriteToBuffer((const BYTE *)%s, %s, buffer, offset, size, fixedBuffer);" %
(paramName, size))
elif paramName in template.arrays:
size = "sizeof(%s) * (int)%s" % (
lttngDataTypeMapping[parameter.winType],
parameter.prop)
if template.name in specialCaseSizes and paramName in specialCaseSizes[template.name]:
size = "(int)(%s)" % specialCaseSizes[template.name][paramName]
pack_list.append(
" success &= WriteToBuffer((const BYTE *)%s, %s, buffer, offset, size, fixedBuffer);" %
(paramName, size))
elif parameter.winType == "win:GUID":
pack_list.append(
" success &= WriteToBuffer(*%s, buffer, offset, size, fixedBuffer);" %
(parameter.name,))
else:
pack_list.append(
" success &= WriteToBuffer(%s, buffer, offset, size, fixedBuffer);" %
(parameter.name,))
code = "\n".join(pack_list) + "\n\n"
checking = """ if (!success)
{
if (!fixedBuffer)
delete[] buffer;
return ERROR_WRITE_FAULT;
}\n\n"""
body = " EventPipe::WriteEvent(*EventPipeEvent" + \
eventName + ", (BYTE *)buffer, size);\n"
footer = """
if (!fixedBuffer)
delete[] buffer;
"""
return header + code + checking + body + footer
keywordMap = {}
def generateEventKeywords(eventKeywords):
mask = 0
# split keywords if there are multiple
allKeywords = eventKeywords.split()
for singleKeyword in allKeywords:
mask = mask | keywordMap[singleKeyword]
return mask
def generateEventPipeCmakeFile(etwmanifest, eventpipe_directory):
tree = DOM.parse(etwmanifest)
with open(eventpipe_directory + "CMakeLists.txt", 'w') as topCmake:
topCmake.write(stdprolog_cmake + "\n")
topCmake.write("""cmake_minimum_required(VERSION 2.8.12.2)
project(eventpipe)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
include_directories(${CLR_DIR}/src/vm)
add_library(eventpipe
STATIC\n""")
for providerNode in tree.getElementsByTagName('provider'):
providerName = providerNode.getAttribute('name')
providerName = providerName.replace("Windows-", '')
providerName = providerName.replace("Microsoft-", '')
providerName_File = providerName.replace('-', '')
providerName_File = providerName_File.lower()
topCmake.write(' "%s.cpp"\n' % (providerName_File))
topCmake.write(' "eventpipehelpers.cpp"\n')
topCmake.write(""" )
add_dependencies(eventpipe GeneratedEventingFiles)
# Install the static eventpipe library
install(TARGETS eventpipe DESTINATION lib)
""")
topCmake.close()
def generateEventPipeHelperFile(etwmanifest, eventpipe_directory):
with open(eventpipe_directory + "eventpipehelpers.cpp", 'w') as helper:
helper.write(stdprolog)
helper.write("""
#include "stdlib.h"
bool ResizeBuffer(char *&buffer, unsigned int& size, unsigned int currLen, unsigned int newSize, bool &fixedBuffer)
{
newSize *= 1.5;
_ASSERTE(newSize > size); // check for overflow
if (newSize < 32)
newSize = 32;
char *newBuffer = new char[newSize];
memcpy(newBuffer, buffer, currLen);
if (!fixedBuffer)
delete[] buffer;
buffer = newBuffer;
size = newSize;
fixedBuffer = false;
return true;
}
bool WriteToBuffer(const BYTE *src, unsigned int len, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer)
{
if(!src) return true;
if (offset + len > size)
{
if (!ResizeBuffer(buffer, size, offset, size + len, fixedBuffer))
return false;
}
memcpy(buffer + offset, src, len);
offset += len;
return true;
}
bool WriteToBuffer(PCWSTR str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer)
{
if(!str) return true;
unsigned int byteCount = (PAL_wcslen(str) + 1) * sizeof(*str);
if (offset + byteCount > size)
{
if (!ResizeBuffer(buffer, size, offset, size + byteCount, fixedBuffer))
return false;
}
memcpy(buffer + offset, str, byteCount);
offset += byteCount;
return true;
}
bool WriteToBuffer(const char *str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer)
{
if(!str) return true;
unsigned int len = strlen(str) + 1;
if (offset + len > size)
{
if (!ResizeBuffer(buffer, size, offset, size + len, fixedBuffer))
return false;
}
memcpy(buffer + offset, str, len);
offset += len;
return true;
}
""")
tree = DOM.parse(etwmanifest)
for providerNode in tree.getElementsByTagName('provider'):
providerName = providerNode.getAttribute('name')
providerPrettyName = providerName.replace("Windows-", '')
providerPrettyName = providerPrettyName.replace("Microsoft-", '')
providerPrettyName = providerPrettyName.replace('-', '_')
helper.write(
"extern \"C\" void Init" +
providerPrettyName +
"();\n\n")
helper.write("extern \"C\" void InitProvidersAndEvents()\n{\n")
for providerNode in tree.getElementsByTagName('provider'):
providerName = providerNode.getAttribute('name')
providerPrettyName = providerName.replace("Windows-", '')
providerPrettyName = providerPrettyName.replace("Microsoft-", '')
providerPrettyName = providerPrettyName.replace('-', '_')
helper.write(" Init" + providerPrettyName + "();\n")
helper.write("}")
helper.close()
def generateEventPipeImplFiles(
etwmanifest, eventpipe_directory, exclusionListFile):
tree = DOM.parse(etwmanifest)
coreclrRoot = os.getcwd()
for providerNode in tree.getElementsByTagName('provider'):
providerName = providerNode.getAttribute('name')
providerPrettyName = providerName.replace("Windows-", '')
providerPrettyName = providerPrettyName.replace("Microsoft-", '')
providerName_File = providerPrettyName.replace('-', '')
providerName_File = providerName_File.lower()
providerPrettyName = providerPrettyName.replace('-', '_')
eventpipefile = eventpipe_directory + providerName_File + ".cpp"
eventpipeImpl = open(eventpipefile, 'w')
eventpipeImpl.write(stdprolog)
header = """
#include \"%s/src/vm/common.h\"
#include \"%s/src/vm/eventpipeprovider.h\"
#include \"%s/src/vm/eventpipeevent.h\"
#include \"%s/src/vm/eventpipe.h\"
bool ResizeBuffer(char *&buffer, unsigned int& size, unsigned int currLen, unsigned int newSize, bool &fixedBuffer);
bool WriteToBuffer(PCWSTR str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer);
bool WriteToBuffer(const char *str, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer);
bool WriteToBuffer(const BYTE *src, unsigned int len, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer);
template <typename T>
bool WriteToBuffer(const T &value, char *&buffer, unsigned int& offset, unsigned int& size, bool &fixedBuffer)
{
if (sizeof(T) + offset > size)
{
if (!ResizeBuffer(buffer, size, offset, size + sizeof(T), fixedBuffer))
return false;
}
*(T *)(buffer + offset) = value;
offset += sizeof(T);
return true;
}
""" % (coreclrRoot, coreclrRoot, coreclrRoot, coreclrRoot)
eventpipeImpl.write(header)
eventpipeImpl.write(
"const WCHAR* %sName = W(\"%s\");\n" % (
providerPrettyName,
providerName
)
)
eventpipeImpl.write(
"EventPipeProvider *EventPipeProvider%s = nullptr;\n" % (
providerPrettyName,
)
)
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
eventpipeImpl.write(
generateClrEventPipeWriteEventsImpl(
providerName,
eventNodes,
allTemplates,
exclusionListFile) + "\n")
eventpipeImpl.close()
def generateEventPipeFiles(
etwmanifest, eventpipe_directory, exclusionListFile):
eventpipe_directory = eventpipe_directory + "/"
tree = DOM.parse(etwmanifest)
if not os.path.exists(eventpipe_directory):
os.makedirs(eventpipe_directory)
# generate Cmake file
generateEventPipeCmakeFile(etwmanifest, eventpipe_directory)
# generate helper file
generateEventPipeHelperFile(etwmanifest, eventpipe_directory)
# generate all keywords
for keywordNode in tree.getElementsByTagName('keyword'):
keywordName = keywordNode.getAttribute('name')
keywordMask = keywordNode.getAttribute('mask')
keywordMap[keywordName] = int(keywordMask, 0)
# generate .cpp file for each provider
generateEventPipeImplFiles(
etwmanifest,
eventpipe_directory,
exclusionListFile)
import argparse
import sys
def main(argv):
# parse the command line
parser = argparse.ArgumentParser(
description="Generates the Code required to instrument eventpipe logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--intermediate', type=str, required=True,
help='full path to eventprovider intermediate directory')
required.add_argument('--exc', type=str, required=True,
help='full path to exclusion list')
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return const.UnknownArguments
sClrEtwAllMan = args.man
intermediate = args.intermediate
exclusionListFile = args.exc
generateEventPipeFiles(sClrEtwAllMan, intermediate, exclusionListFile)
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
|
chiamingyen/pygroup
|
refs/heads/master
|
wsgi/static/Brython2.2.0rc0-20140913-093500/Lib/unittest/result.py
|
727
|
"""Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
#fixme brython
pass
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
#fix me brython
#return '__unittest' in tb.tb_frame.f_globals
return True #for now, lets just return False
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
stxnext-kindergarten/presence-analizer-rkierzkowski
|
refs/heads/master
|
src/presence_analyzer/helpers.py
|
59
|
# -*- coding: utf-8 -*-
"""
Helper functions used in templates.
"""
|
jobiols/odoo-argentina
|
refs/heads/9.0
|
l10n_ar_account/report/__init__.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
# TODO arreglar e incorporar
from . import invoice_analysis
from . import account_ar_vat_line
|
chyla/pat-lms
|
refs/heads/master
|
web/slas-web/util/exception/__init__.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from command_exception import *
|
h3biomed/ansible
|
refs/heads/h3
|
lib/ansible/modules/source_control/gitlab_user.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_user
short_description: Creates/updates/deletes Gitlab Users
description:
- When the user does not exist in Gitlab, it will be created.
- When the user does exists and state=absent, the user will be deleted.
- When changes are made to user, the user will be updated.
version_added: "2.1"
author:
- Werner Dijkerman (@dj-wasabi)
- Guillaume Martinez (@Lunik)
requirements:
- python >= 2.7
- python-gitlab python module
- administrator rights on the Gitlab server
extends_documentation_fragment:
- auth_basic
options:
server_url:
description:
- The URL of the Gitlab server, with protocol (i.e. http or https).
required: true
type: str
login_user:
description:
- Gitlab user name.
type: str
login_password:
description:
- Gitlab password for login_user
type: str
api_token:
description:
- Gitlab token for logging in.
type: str
aliases:
- login_token
name:
description:
- Name of the user you want to create
required: true
type: str
username:
description:
- The username of the user.
required: true
type: str
password:
description:
- The password of the user.
- GitLab server enforces minimum password length to 8, set this value with 8 or more characters.
required: true
type: str
email:
description:
- The email that belongs to the user.
required: true
type: str
sshkey_name:
description:
- The name of the sshkey
type: str
sshkey_file:
description:
- The ssh key itself.
type: str
group:
description:
- Id or Full path of parent group in the form of group/name
- Add user as an member to this group.
type: str
access_level:
description:
- The access level to the group. One of the following can be used.
- guest
- reporter
- developer
- master (alias for maintainer)
- maintainer
- owner
default: guest
type: str
choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"]
state:
description:
- create or delete group.
- Possible values are present and absent.
default: present
type: str
choices: ["present", "absent"]
confirm:
description:
- Require confirmation.
type: bool
default: yes
version_added: "2.4"
isadmin:
description:
- Grant admin privileges to the user
type: bool
default: no
version_added: "2.8"
external:
description:
- Define external parameter for this user
type: bool
default: no
version_added: "2.8"
'''
EXAMPLES = '''
- name: "Delete Gitlab User"
gitlab_user:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
validate_certs: False
username: myusername
state: absent
delegate_to: localhost
- name: "Create Gitlab User"
gitlab_user:
api_url: https://gitlab.example.com/
validate_certs: True
api_username: dj-wasabi
api_password: "MySecretPassword"
name: My Name
username: myusername
password: mysecretpassword
email: me@example.com
sshkey_name: MySSH
sshkey_file: ssh-rsa AAAAB3NzaC1yc...
state: present
group: super_group/mon_group
access_level: owner
delegate_to: localhost
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Success"
result:
description: json parsed response from the server
returned: always
type: dict
error:
description: the error message returned by the Gitlab API
returned: failed
type: str
sample: "400: path is already in use"
user:
description: API object
returned: always
type: dict
'''
import os
import re
import traceback
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.gitlab import findGroup
class GitLabUser(object):
def __init__(self, module, gitlab_instance):
self._module = module
self._gitlab = gitlab_instance
self.userObject = None
self.ACCESS_LEVEL = {
'guest': gitlab.GUEST_ACCESS,
'reporter': gitlab.REPORTER_ACCESS,
'developer': gitlab.DEVELOPER_ACCESS,
'master': gitlab.MAINTAINER_ACCESS,
'maintainer': gitlab.MAINTAINER_ACCESS,
'owner': gitlab.OWNER_ACCESS}
'''
@param username Username of the user
@param options User options
'''
def createOrUpdateUser(self, username, options):
changed = False
# Because we have already call userExists in main()
if self.userObject is None:
user = self.createUser({
'name': options['name'],
'username': username,
'password': options['password'],
'email': options['email'],
'skip_confirmation': not options['confirm'],
'admin': options['isadmin'],
'external': options['external']})
changed = True
else:
changed, user = self.updateUser(self.userObject, {
'name': options['name'],
'email': options['email'],
'is_admin': options['isadmin'],
'external': options['external']})
# Assign ssh keys
if options['sshkey_name'] and options['sshkey_file']:
changed = changed or self.addSshKeyToUser(user, {
'name': options['sshkey_name'],
'file': options['sshkey_file']})
# Assign group
if options['group_path']:
changed = changed or self.assignUserToGroup(user, options['group_path'], options['access_level'])
self.userObject = user
if changed:
if self._module.check_mode:
self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username)
try:
user.save()
except Exception as e:
self._module.fail_json(msg="Failed to update user: %s " % to_native(e))
return True
else:
return False
'''
@param group User object
'''
def getUserId(self, user):
if user is not None:
return user.id
return None
'''
@param user User object
@param sshkey_name Name of the ssh key
'''
def sshKeyExists(self, user, sshkey_name):
keyList = map(lambda k: k.title, user.keys.list())
return sshkey_name in keyList
'''
@param user User object
@param sshkey Dict containing sshkey infos {"name": "", "file": ""}
'''
def addSshKeyToUser(self, user, sshkey):
if not self.sshKeyExists(user, sshkey['name']):
if self._module.check_mode:
return True
try:
user.keys.create({
'title': sshkey['name'],
'key': sshkey['file']})
except gitlab.exceptions.GitlabCreateError as e:
self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e))
return True
return False
'''
@param group Group object
@param user_id Id of the user to find
'''
def findMember(self, group, user_id):
try:
member = group.members.get(user_id)
except gitlab.exceptions.GitlabGetError as e:
return None
return member
'''
@param group Group object
@param user_id Id of the user to check
'''
def memberExists(self, group, user_id):
member = self.findMember(group, user_id)
return member is not None
'''
@param group Group object
@param user_id Id of the user to check
@param access_level Gitlab access_level to check
'''
def memberAsGoodAccessLevel(self, group, user_id, access_level):
member = self.findMember(group, user_id)
return member.access_level == access_level
'''
@param user User object
@param group_path Complete path of the Group including parent group path. <parent_path>/<group_path>
@param access_level Gitlab access_level to assign
'''
def assignUserToGroup(self, user, group_identifier, access_level):
group = findGroup(self._gitlab, group_identifier)
if self._module.check_mode:
return True
if group is None:
return False
if self.memberExists(group, self.getUserId(user)):
member = self.findMember(group, self.getUserId(user))
if not self.memberAsGoodAccessLevel(group, member.id, self.ACCESS_LEVEL[access_level]):
member.access_level = self.ACCESS_LEVEL[access_level]
member.save()
return True
else:
try:
group.members.create({
'user_id': self.getUserId(user),
'access_level': self.ACCESS_LEVEL[access_level]})
except gitlab.exceptions.GitlabCreateError as e:
self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e))
return True
return False
'''
@param user User object
@param arguments User attributes
'''
def updateUser(self, user, arguments):
changed = False
for arg_key, arg_value in arguments.items():
if arguments[arg_key] is not None:
if getattr(user, arg_key) != arguments[arg_key]:
setattr(user, arg_key, arguments[arg_key])
changed = True
return (changed, user)
'''
@param arguments User attributes
'''
def createUser(self, arguments):
if self._module.check_mode:
return True
try:
user = self._gitlab.users.create(arguments)
except (gitlab.exceptions.GitlabCreateError) as e:
self._module.fail_json(msg="Failed to create user: %s " % to_native(e))
return user
'''
@param username Username of the user
'''
def findUser(self, username):
users = self._gitlab.users.list(search=username)
for user in users:
if (user.username == username):
return user
'''
@param username Username of the user
'''
def existsUser(self, username):
# When user exists, object will be stored in self.userObject.
user = self.findUser(username)
if user:
self.userObject = user
return True
return False
def deleteUser(self):
if self._module.check_mode:
return True
user = self.userObject
return user.delete()
def deprecation_warning(module):
deprecated_aliases = ['login_token']
module.deprecate("Aliases \'{aliases}\' are deprecated".format(aliases='\', \''.join(deprecated_aliases)), "2.10")
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
server_url=dict(type='str', required=True, removed_in_version="2.10"),
login_user=dict(type='str', no_log=True, removed_in_version="2.10"),
login_password=dict(type='str', no_log=True, removed_in_version="2.10"),
api_token=dict(type='str', no_log=True, aliases=["login_token"]),
name=dict(type='str', required=True),
state=dict(type='str', default="present", choices=["absent", "present"]),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
email=dict(type='str', required=True),
sshkey_name=dict(type='str'),
sshkey_file=dict(type='str'),
group=dict(type='str'),
access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]),
confirm=dict(type='bool', default=True),
isadmin=dict(type='bool', default=False),
external=dict(type='bool', default=False),
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['api_url', 'server_url'],
['api_username', 'login_user'],
['api_password', 'login_password'],
['api_username', 'api_token'],
['api_password', 'api_token'],
['login_user', 'login_token'],
['login_password', 'login_token']
],
required_together=[
['api_username', 'api_password'],
['login_user', 'login_password'],
],
required_one_of=[
['api_username', 'api_token', 'login_user', 'login_token']
],
supports_check_mode=True,
)
deprecation_warning(module)
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
api_url = module.params['api_url']
validate_certs = module.params['validate_certs']
api_user = module.params['api_username']
api_password = module.params['api_password']
gitlab_url = server_url if api_url is None else api_url
gitlab_user = login_user if api_user is None else api_user
gitlab_password = login_password if api_password is None else api_password
gitlab_token = module.params['api_token']
user_name = module.params['name']
state = module.params['state']
user_username = module.params['username'].lower()
user_password = module.params['password']
user_email = module.params['email']
user_sshkey_name = module.params['sshkey_name']
user_sshkey_file = module.params['sshkey_file']
group_path = module.params['group']
access_level = module.params['access_level']
confirm = module.params['confirm']
user_isadmin = module.params['isadmin']
user_external = module.params['external']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
try:
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
private_token=gitlab_token, api_version=4)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s" % to_native(e))
except (gitlab.exceptions.GitlabHttpError) as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s. \
Gitlab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
gitlab_user = GitLabUser(module, gitlab_instance)
user_exists = gitlab_user.existsUser(user_username)
if state == 'absent':
if user_exists:
gitlab_user.deleteUser()
module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username)
else:
module.exit_json(changed=False, msg="User deleted or does not exists")
if state == 'present':
if gitlab_user.createOrUpdateUser(user_username, {
"name": user_name,
"password": user_password,
"email": user_email,
"sshkey_name": user_sshkey_name,
"sshkey_file": user_sshkey_file,
"group_path": group_path,
"access_level": access_level,
"confirm": confirm,
"isadmin": user_isadmin,
"external": user_external}):
module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs)
else:
module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs)
if __name__ == '__main__':
main()
|
mtarek/BeRTOS
|
refs/heads/master
|
wizard/const.py
|
8
|
#!/usr/bin/env python
# encoding: utf-8
#
# This file is part of BeRTOS.
#
# Bertos is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# As a special exception, you may use this file as part of a free software
# library without restriction. Specifically, if other files instantiate
# templates or use macros or inline functions from this file, or you compile
# this file and link it with other files to produce an executable, this
# file does not by itself cause the resulting executable to be covered by
# the GNU General Public License. This exception does not however
# invalidate any other reasons why the executable file might be covered by
# the GNU General Public License.
#
# Copyright 2008 Develer S.r.l. (http://www.develer.com/)
#
#
# Author: Lorenzo Berni <duplo@develer.com>
#
import os, sys
_tmp = sys.argv[0]
if os.path.islink(_tmp):
_tmp = os.readlink(_tmp)
DATA_DIR = os.path.dirname(os.path.abspath(_tmp))
del _tmp
PREDEFINED_BOARDS_DIR = 'boards'
PREDEFINED_BOARD_SPEC_FILE = '.spec'
PREDEFINED_BOARD_ICON_FILE = '.icon.png'
PREDEFINED_BOARD_IMAGE_FILE = '.image.png'
# Predefined icons in resouces
PREDEFINED_BOARD_DEFAULT_DIR_ICON = ':images/default_dir_icon.png'
PREDEFINED_BOARD_DEFAULT_PROJECT_ICON = ':images/default_project_icon.png'
PREDEFINED_BOARD_DEFAULT_ICON = ':images/default_board_icon.png'
PREDEFINED_BOARD_DEFAULT_IMAGE = ':images/default_board_image.png'
# PREDEFINED_BOARD_SPEC_INFO = {
# 'name': <name of the board/directory>,
# 'description': <description of the board/directory>,
# }
CPU_DEF = {
"CPU_NAME": "",
"CPU_DIR": "",
"DEFINITION_PATH": "",
"TOOLCHAIN": "",
"CPU_TAGS": [],
"CPPA_SRC" : [],
"CXX_SRC": [],
"ASRC": [],
"C_SRC": [],
"PC_SRC" : [],
"CPU_DESC" : [],
"CPU_DEFAULT_FREQ": "1000000",
"GDB_INIT_SCRIPT": "",
}
TOOLCHAIN_ITEMS = ("ld", "as")
CPU_DEFINITION = "*.cdef"
GCC_NAME = "*gcc*"
MODULE_CONFIGURATION = "cfg_*.h"
UI_LOCATION = "ui"
EXTENSION_FILTER = (
".c",
".cpp",
".cxx",
".h",
".c++",
".ld",
".S",
".mk",
"Makefile",
)
IGNORE_LIST = (
".svn",
"CVS",
".git",
)
MODULE_DEFINITION = {
"module_name": "module_name",
"module_configuration": "module_configuration",
"module_depends": "module_depends",
"module_harvard": "module_harvard",
"module_hw": "module_hw",
"module_supports": "module_supports",
}
MK_PARAM_ID = "MK_"
|
motion2015/a3
|
refs/heads/a3
|
lms/djangoapps/mobile_api/social_facebook/test_utils.py
|
104
|
"""
Test utils for Facebook functionality
"""
import httpretty
import json
from rest_framework.test import APITestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from social.apps.django_app.default.models import UserSocialAuth
from student.models import CourseEnrollment
from student.views import login_oauth_token
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference, set_user_preference
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.factories import UserFactory
class SocialFacebookTestCase(ModuleStoreTestCase, APITestCase):
"""
Base Class for social test cases
"""
USERS = {
1: {'USERNAME': "TestUser One",
'EMAIL': "test_one@ebnotions.com",
'PASSWORD': "edx",
'FB_ID': "11111111111111111"},
2: {'USERNAME': "TestUser Two",
'EMAIL': "test_two@ebnotions.com",
'PASSWORD': "edx",
'FB_ID': "22222222222222222"},
3: {'USERNAME': "TestUser Three",
'EMAIL': "test_three@ebnotions.com",
'PASSWORD': "edx",
'FB_ID': "33333333333333333"}
}
BACKEND = "facebook"
USER_URL = "https://graph.facebook.com/me"
UID_FIELD = "id"
_FB_USER_ACCESS_TOKEN = 'ThisIsAFakeFacebookToken'
users = {}
def setUp(self):
super(SocialFacebookTestCase, self).setUp()
def set_facebook_interceptor_for_access_token(self):
"""
Facebook interceptor for groups access_token
"""
httpretty.register_uri(
httpretty.GET,
'https://graph.facebook.com/oauth/access_token?client_secret=' +
settings.FACEBOOK_APP_SECRET + '&grant_type=client_credentials&client_id=' +
settings.FACEBOOK_APP_ID,
body='FakeToken=FakeToken',
status=200
)
def set_facebook_interceptor_for_groups(self, data, status):
"""
Facebook interceptor for groups test
"""
httpretty.register_uri(
httpretty.POST,
'https://graph.facebook.com/' + settings.FACEBOOK_API_VERSION +
'/' + settings.FACEBOOK_APP_ID + '/groups',
body=json.dumps(data),
status=status
)
def set_facebook_interceptor_for_members(self, data, status, group_id, member_id):
"""
Facebook interceptor for group members tests
"""
httpretty.register_uri(
httpretty.POST,
'https://graph.facebook.com/' + settings.FACEBOOK_API_VERSION +
'/' + group_id + '/members?member=' + member_id +
'&access_token=FakeToken',
body=json.dumps(data),
status=status
)
def set_facebook_interceptor_for_friends(self, data):
"""
Facebook interceptor for friends tests
"""
httpretty.register_uri(
httpretty.GET,
"https://graph.facebook.com/v2.2/me/friends",
body=json.dumps(data),
status=201
)
def delete_group(self, group_id):
"""
Invoke the delete groups view
"""
url = reverse('create-delete-group', kwargs={'group_id': group_id})
response = self.client.delete(url)
return response
def invite_to_group(self, group_id, member_ids):
"""
Invoke the invite to group view
"""
url = reverse('add-remove-member', kwargs={'group_id': group_id, 'member_id': ''})
return self.client.post(url, {'member_ids': member_ids})
def remove_from_group(self, group_id, member_id):
"""
Invoke the remove from group view
"""
url = reverse('add-remove-member', kwargs={'group_id': group_id, 'member_id': member_id})
response = self.client.delete(url)
self.assertEqual(response.status_code, 200)
def link_edx_account_to_social(self, user, backend, social_uid):
"""
Register the user to the social auth backend
"""
reverse(login_oauth_token, kwargs={"backend": backend})
UserSocialAuth.objects.create(user=user, provider=backend, uid=social_uid)
def set_sharing_preferences(self, user, boolean_value):
"""
Sets self.user's share settings to boolean_value
"""
# Note that setting the value to boolean will result in the conversion to the unicode form of the boolean.
set_user_preference(user, 'share_with_facebook_friends', boolean_value)
self.assertEqual(get_user_preference(user, 'share_with_facebook_friends'), unicode(boolean_value))
def _change_enrollment(self, action, course_id=None, email_opt_in=None):
"""
Change the student's enrollment status in a course.
Args:
action (string): The action to perform (either "enroll" or "unenroll")
Keyword Args:
course_id (unicode): If provided, use this course ID. Otherwise, use the
course ID created in the setup for this test.
email_opt_in (unicode): If provided, pass this value along as
an additional GET parameter.
"""
if course_id is None:
course_id = unicode(self.course.id)
params = {
'enrollment_action': action,
'course_id': course_id
}
if email_opt_in:
params['email_opt_in'] = email_opt_in
return self.client.post(reverse('change_enrollment'), params)
def user_create_and_signin(self, user_number):
"""
Create a user and sign them in
"""
self.users[user_number] = UserFactory.create(
username=self.USERS[user_number]['USERNAME'],
email=self.USERS[user_number]['EMAIL'],
password=self.USERS[user_number]['PASSWORD']
)
self.client.login(username=self.USERS[user_number]['USERNAME'], password=self.USERS[user_number]['PASSWORD'])
def enroll_in_course(self, user, course):
"""
Enroll a user in the course
"""
resp = self._change_enrollment('enroll', course_id=course.id)
self.assertEqual(resp.status_code, 200)
self.assertTrue(CourseEnrollment.is_enrolled(user, course.id))
course_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course.id)
self.assertTrue(is_active)
self.assertEqual(course_mode, 'honor')
|
openstack/cliff
|
refs/heads/master
|
cliff/tests/test_formatters_csv.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import io
import unittest
from unittest import mock
from cliff.formatters import commaseparated
from cliff.tests import test_columns
class TestCSVFormatter(unittest.TestCase):
def test_commaseparated_list_formatter(self):
sf = commaseparated.CSVLister()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
d2 = ('D', 'E', 'F')
data = [d1, d2]
expected = 'a,b,c\nA,B,C\nD,E,F\n'
output = io.StringIO()
parsed_args = mock.Mock()
parsed_args.quote_mode = 'none'
sf.emit_list(c, data, output, parsed_args)
actual = output.getvalue()
self.assertEqual(expected, actual)
def test_commaseparated_list_formatter_quoted(self):
sf = commaseparated.CSVLister()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
d2 = ('D', 'E', 'F')
data = [d1, d2]
expected = '"a","b","c"\n"A","B","C"\n"D","E","F"\n'
output = io.StringIO()
# Parse arguments as if passed on the command-line
parser = argparse.ArgumentParser(description='Testing...')
sf.add_argument_group(parser)
parsed_args = parser.parse_args(['--quote', 'all'])
sf.emit_list(c, data, output, parsed_args)
actual = output.getvalue()
self.assertEqual(expected, actual)
def test_commaseparated_list_formatter_formattable_column(self):
sf = commaseparated.CSVLister()
c = ('a', 'b', 'c')
d1 = ('A', 'B', test_columns.FauxColumn(['the', 'value']))
data = [d1]
expected = 'a,b,c\nA,B,[\'the\'\\, \'value\']\n'
output = io.StringIO()
parsed_args = mock.Mock()
parsed_args.quote_mode = 'none'
sf.emit_list(c, data, output, parsed_args)
actual = output.getvalue()
self.assertEqual(expected, actual)
def test_commaseparated_list_formatter_unicode(self):
sf = commaseparated.CSVLister()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
happy = '高兴'
d2 = ('D', 'E', happy)
data = [d1, d2]
expected = 'a,b,c\nA,B,C\nD,E,%s\n' % happy
output = io.StringIO()
parsed_args = mock.Mock()
parsed_args.quote_mode = 'none'
sf.emit_list(c, data, output, parsed_args)
actual = output.getvalue()
self.assertEqual(expected, actual)
|
dmitriyse/pythonnet
|
refs/heads/master
|
src/tests/test_module.py
|
2
|
# -*- coding: utf-8 -*-
"""Test CLR modules and the CLR import hook."""
import clr
import time
import types
import warnings
from fnmatch import fnmatch
import pytest
from ._compat import ClassType, PY2, PY3, range
from .utils import is_clr_class, is_clr_module, is_clr_root_module
# testImplicitAssemblyLoad() passes on deprecation warning; perfect! #
# clr.AddReference('System.Windows.Forms')
def test_import_hook_works():
"""Test that the import hook works correctly both using the
included runtime and an external runtime. This must be
the first test run in the unit tests!"""
from System import String
def test_import_clr():
import clr
assert is_clr_root_module(clr)
def test_version_clr():
import clr
assert clr.__version__ >= "2.2.0"
def test_preload_var():
import clr
assert clr.getPreload() is False, clr.getPreload()
clr.setPreload(False)
assert clr.getPreload() is False, clr.getPreload()
try:
clr.setPreload(True)
assert clr.getPreload() is True, clr.getPreload()
clr.setPreload(0)
assert clr.getPreload() is False, clr.getPreload()
clr.setPreload(1)
assert clr.getPreload() is True, clr.getPreload()
import System.Configuration
content = dir(System.Configuration)
assert len(content) > 10, content
finally:
clr.setPreload(False)
def test_module_interface():
"""Test the interface exposed by CLR module objects."""
import System
assert type(System.__dict__) == type({})
assert System.__name__ == 'System'
# the filename can be any module from the System namespace
# (eg System.Data.dll or System.dll, but also mscorlib.dll)
system_file = System.__file__
assert fnmatch(system_file, "*System*.dll") or fnmatch(system_file, "*mscorlib.dll"), \
"unexpected System.__file__: " + system_file
assert System.__doc__.startswith("Namespace containing types from the following assemblies:")
assert is_clr_class(System.String)
assert is_clr_class(System.Int32)
def test_simple_import():
"""Test simple import."""
import System
assert is_clr_module(System)
assert System.__name__ == 'System'
import sys
assert isinstance(sys, types.ModuleType)
assert sys.__name__ == 'sys'
if PY3:
import http.client as httplib
assert isinstance(httplib, types.ModuleType)
assert httplib.__name__ == 'http.client'
elif PY2:
import httplib
assert isinstance(httplib, types.ModuleType)
assert httplib.__name__ == 'httplib'
def test_simple_import_with_alias():
"""Test simple import with aliasing."""
import System as mySystem
assert is_clr_module(mySystem)
assert mySystem.__name__ == 'System'
import sys as mySys
assert isinstance(mySys, types.ModuleType)
assert mySys.__name__ == 'sys'
if PY3:
import http.client as myHttplib
assert isinstance(myHttplib, types.ModuleType)
assert myHttplib.__name__ == 'http.client'
elif PY2:
import httplib as myHttplib
assert isinstance(myHttplib, types.ModuleType)
assert myHttplib.__name__ == 'httplib'
def test_dotted_name_import():
"""Test dotted-name import."""
import System.Reflection
assert is_clr_module(System.Reflection)
assert System.Reflection.__name__ == 'System.Reflection'
import xml.dom
assert isinstance(xml.dom, types.ModuleType)
assert xml.dom.__name__ == 'xml.dom'
def test_multiple_dotted_name_import():
"""Test an import bug with multiple dotted imports."""
import System.Data
assert is_clr_module(System.Data)
assert System.Data.__name__ == 'System.Data'
import System.Data
assert is_clr_module(System.Data)
assert System.Data.__name__ == 'System.Data'
def test_dotted_name_import_with_alias():
"""Test dotted-name import with aliasing."""
import System.Reflection as SysRef
assert is_clr_module(SysRef)
assert SysRef.__name__ == 'System.Reflection'
import xml.dom as myDom
assert isinstance(myDom, types.ModuleType)
assert myDom.__name__ == 'xml.dom'
def test_simple_import_from():
"""Test simple 'import from'."""
from System import Reflection
assert is_clr_module(Reflection)
assert Reflection.__name__ == 'System.Reflection'
from xml import dom
assert isinstance(dom, types.ModuleType)
assert dom.__name__ == 'xml.dom'
def test_simple_import_from_with_alias():
"""Test simple 'import from' with aliasing."""
from System import Collections as Coll
assert is_clr_module(Coll)
assert Coll.__name__ == 'System.Collections'
from xml import dom as myDom
assert isinstance(myDom, types.ModuleType)
assert myDom.__name__ == 'xml.dom'
def test_dotted_name_import_from():
"""Test dotted-name 'import from'."""
from System.Collections import Specialized
assert is_clr_module(Specialized)
assert Specialized.__name__ == 'System.Collections.Specialized'
from System.Collections.Specialized import StringCollection
assert is_clr_class(StringCollection)
assert StringCollection.__name__ == 'StringCollection'
from xml.dom import pulldom
assert isinstance(pulldom, types.ModuleType)
assert pulldom.__name__ == 'xml.dom.pulldom'
from xml.dom.pulldom import PullDOM
assert isinstance(PullDOM, ClassType)
assert PullDOM.__name__ == 'PullDOM'
def test_dotted_name_import_from_with_alias():
"""Test dotted-name 'import from' with aliasing."""
from System.Collections import Specialized as Spec
assert is_clr_module(Spec)
assert Spec.__name__ == 'System.Collections.Specialized'
from System.Collections.Specialized import StringCollection as SC
assert is_clr_class(SC)
assert SC.__name__ == 'StringCollection'
from xml.dom import pulldom as myPulldom
assert isinstance(myPulldom, types.ModuleType)
assert myPulldom.__name__ == 'xml.dom.pulldom'
from xml.dom.pulldom import PullDOM as myPullDOM
assert isinstance(myPullDOM, ClassType)
assert myPullDOM.__name__ == 'PullDOM'
def test_from_module_import_star():
"""Test from module import * behavior."""
count = len(locals().keys())
m = __import__('System.Xml', globals(), locals(), ['*'])
assert m.__name__ == 'System.Xml'
assert is_clr_module(m)
assert len(locals().keys()) > count + 1
def test_implicit_assembly_load():
"""Test implicit assembly loading via import."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# should trigger a DeprecationWarning as Microsoft.Build hasn't
# been added as a reference yet (and should exist for mono)
import Microsoft.Build
assert len(w) == 1
assert isinstance(w[0].message, DeprecationWarning)
with warnings.catch_warnings(record=True) as w:
clr.AddReference("System.Windows.Forms")
import System.Windows.Forms as Forms
assert is_clr_module(Forms)
assert Forms.__name__ == 'System.Windows.Forms'
from System.Windows.Forms import Form
assert is_clr_class(Form)
assert Form.__name__ == 'Form'
assert len(w) == 0
def test_explicit_assembly_load():
"""Test explicit assembly loading using standard CLR tools."""
from System.Reflection import Assembly
import System, sys
assembly = Assembly.LoadWithPartialName('System.Data')
assert assembly is not None
import System.Data
assert 'System.Data' in sys.modules
assembly = Assembly.LoadWithPartialName('SpamSpamSpamSpamEggsAndSpam')
assert assembly is None
def test_implicit_load_already_valid_namespace():
"""Test implicit assembly load over an already valid namespace."""
# In this case, the mscorlib assembly (loaded by default) defines
# a number of types in the System namespace. There is also a System
# assembly, which is _not_ loaded by default, which also contains
# types in the System namespace. The desired behavior is for the
# Python runtime to "do the right thing", allowing types from both
# assemblies to be found in the System module implicitly.
import System
assert is_clr_class(System.UriBuilder)
def test_import_non_existant_module():
"""Test import failure for a non-existent module."""
with pytest.raises(ImportError):
import System.SpamSpamSpam
def test_lookup_no_namespace_type():
"""Test lookup of types without a qualified namespace."""
import Python.Test
import clr
assert is_clr_class(clr.NoNamespaceType)
def test_module_lookup_recursion():
"""Test for recursive lookup handling."""
with pytest.raises(ImportError):
from System import System
with pytest.raises(AttributeError):
import System
_ = System.System
def test_module_get_attr():
"""Test module getattr behavior."""
import System
int_type = System.Int32
assert is_clr_class(int_type)
module = System.Xml
assert is_clr_module(module)
with pytest.raises(AttributeError):
_ = System.Spam
with pytest.raises(TypeError):
_ = getattr(System, 1)
def test_module_attr_abuse():
"""Test handling of attempts to set module attributes."""
# It would be safer to use a dict-proxy as the __dict__ for CLR
# modules, but as of Python 2.3 some parts of the CPython runtime
# like dir() will fail if a module dict is not a real dictionary.
def test():
import System
System.__dict__['foo'] = 0
return 1
assert test()
def test_module_type_abuse():
"""Test handling of attempts to break the module type."""
import System
mtype = type(System)
with pytest.raises(TypeError):
mtype.__getattribute__(0, 'spam')
with pytest.raises(TypeError):
mtype.__setattr__(0, 'spam', 1)
with pytest.raises(TypeError):
mtype.__repr__(0)
def test_clr_list_assemblies():
from clr import ListAssemblies
verbose = list(ListAssemblies(True))
short = list(ListAssemblies(False))
assert u'mscorlib' in short
assert u'System' in short
assert u'Culture=' in verbose[0]
assert u'Version=' in verbose[0]
def test_clr_add_reference():
from clr import AddReference
from System.IO import FileNotFoundException
for name in ("System", "Python.Runtime"):
assy = AddReference(name)
assy_name = assy.GetName().Name
assert assy_name == name
with pytest.raises(FileNotFoundException):
AddReference("somethingtotallysilly")
def test_clr_get_clr_type():
"""Test clr.GetClrType()."""
from clr import GetClrType
import System
from System import IComparable
from System import ArgumentException
assert GetClrType(System.String).FullName == "System.String"
comparable = GetClrType(IComparable)
assert comparable.FullName == "System.IComparable"
assert comparable.IsInterface
assert GetClrType(int).FullName == "System.Int32"
assert GetClrType(str).FullName == "System.String"
assert GetClrType(float).FullName == "System.Double"
dblarr = System.Array[System.Double]
assert GetClrType(dblarr).FullName == "System.Double[]"
with pytest.raises(TypeError):
GetClrType(1)
with pytest.raises(TypeError):
GetClrType("thiswillfail")
def test_assembly_load_thread_safety():
from Python.Test import ModuleTest
# spin up .NET thread which loads assemblies and triggers AppDomain.AssemblyLoad event
ModuleTest.RunThreads()
time.sleep(1e-5)
for _ in range(1, 100):
# call import clr, which in AssemblyManager.GetNames iterates through the loaded types
import clr
# import some .NET types
from System import DateTime
from System import Guid
from System.Collections.Generic import Dictionary
_ = Dictionary[Guid, DateTime]()
ModuleTest.JoinThreads()
|
WarrenWeckesser/scikits-image
|
refs/heads/master
|
skimage/future/setup.py
|
48
|
def configuration(parent_package='skimage', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('future', parent_package, top_path)
config.add_subpackage('graph')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
config = configuration(top_path='').todict()
setup(**config)
|
rolandovillca/python_basic_concepts
|
refs/heads/master
|
oo/create_object.py
|
4
|
# Create an Employee class
class Employee:
"""Common base class for all employees"""
emp_count = 0
# Create constructor of class.
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.emp_count += 1
# Define a method
def display_count(self):
print "Total Employee %d" % (Employee.emp_count)
# Define another method
def display_employee(self):
print "Name: %s, Salary: %s" % (self.name, self.salary)
# Define another method
def print_attributes(self):
print 'Employee.__doc__: ', Employee.__doc__
print 'Employee.__name__: ', Employee.__name__
print 'Employee.__module__: ', Employee.__module__
print 'Employee.__bases__: ', Employee.__bases__
print 'Employee.__dict__: ', Employee.__dict__
# Create Instance Objects
emp1 = Employee('Zara', 2000)
emp2 = Employee('Manni', 5000)
# Accessing Attributes:
emp1.display_employee()
emp2.display_employee()
print 'Total Employee %d' % Employee.emp_count
print
emp1.print_attributes()
|
jniediek/mne-python
|
refs/heads/master
|
mne/datasets/spm_face/__init__.py
|
8
|
"""SPM face dataset
"""
from .spm_data import data_path, has_spm_data, get_version, requires_spm_data
|
Witia1/olympia
|
refs/heads/master
|
lib/misc/urlconf_decorator.py
|
53
|
"""
Apply a decorator to a whole urlconf instead of a single view function.
Usage::
>>> from urlconf_decorator import decorate
>>>
>>> def dec(f):
... def wrapper(*args, **kw):
... print 'inside the decorator'
... return f(*args, **kw)
... return wrapper
>>>
>>> urlpatterns = patterns(''
... url('^admin/', decorate(dec, include(admin.site.urls))),
... )
The decorator applied to the urlconf is a normal function decorator. It gets
wrapped around each callback in the urlconf as if you had @decorator above the
function.
"""
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
def decorate(decorator, urlconf):
if isinstance(urlconf, (list, tuple)):
for item in urlconf:
decorate(decorator, item)
elif isinstance(urlconf, RegexURLResolver):
for item in urlconf.url_patterns:
decorate(decorator, item)
elif isinstance(urlconf, RegexURLPattern):
urlconf._callback = decorator(urlconf.callback)
return urlconf
|
OCA/stock-logistics-warehouse
|
refs/heads/12.0
|
stock_orderpoint_mrp_link/tests/__init__.py
|
2
|
from . import test_stock_orderpoint_mrp_link
|
f-prettyland/angr
|
refs/heads/master
|
angr/procedures/stubs/Nop.py
|
8
|
import angr
######################################
# Doing nothing
######################################
class Nop(angr.SimProcedure):
def run(self):
pass
|
beezee/GAE-Django-site
|
refs/heads/master
|
django/contrib/gis/models.py
|
624
|
from django.db import connection
if (hasattr(connection.ops, 'spatial_version') and
not connection.ops.mysql):
# Getting the `SpatialRefSys` and `GeometryColumns`
# models for the default spatial backend. These
# aliases are provided for backwards-compatibility.
SpatialRefSys = connection.ops.spatial_ref_sys()
GeometryColumns = connection.ops.geometry_columns()
|
nwjs/chromium.src
|
refs/heads/nw45-log
|
tools/cr/cr/targets/content_shell.py
|
68
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module for the content_shell targets."""
import cr
class ContentShellTarget(cr.NamedTarget):
NAME = 'content_shell'
CONFIG = cr.Config.From(
CR_RUN_ARGUMENTS=cr.Config.Optional('-d "{CR_URL!e}"'),
CR_TARGET_NAME='ContentShell',
CR_PACKAGE='org.chromium.content_shell_apk',
CR_ACTIVITY='.ContentShellActivity',
)
class ContentShellTestTarget(cr.NamedTarget):
NAME = 'content_shell_test'
CONFIG = cr.Config.From(
CR_TARGET_NAME='ContentShellTest',
CR_TEST_TYPE=cr.Target.INSTRUMENTATION_TEST,
CR_RUN_DEPENDENCIES=[ContentShellTarget.NAME],
)
|
seize-the-dave/XlsxWriter
|
refs/heads/master
|
xlsxwriter/test/workbook/test_write_book_views.py
|
8
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...workbook import Workbook
class TestWriteBookViews(unittest.TestCase):
"""
Test the Workbook _write_book_views() method.
"""
def setUp(self):
self.fh = StringIO()
self.workbook = Workbook()
self.workbook._set_filehandle(self.fh)
def test_write_book_views(self):
"""Test the _write_book_views() method"""
self.workbook._write_book_views()
exp = """<bookViews><workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/></bookViews>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def tearDown(self):
self.workbook.fileclosed = 1
|
silly-wacky-3-town-toon/SOURCE-COD
|
refs/heads/master
|
Panda3D-1.10.0/python/Lib/test/subprocessdata/sigchild_ignore.py
|
243
|
import signal, subprocess, sys, time
# On Linux this causes os.waitpid to fail with OSError as the OS has already
# reaped our child process. The wait() passing the OSError on to the caller
# and causing us to exit with an error is what we are testing against.
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
subprocess.Popen([sys.executable, '-c', 'print("albatross")']).wait()
# Also ensure poll() handles an errno.ECHILD appropriately.
p = subprocess.Popen([sys.executable, '-c', 'print("albatross")'])
num_polls = 0
while p.poll() is None:
# Waiting for the process to finish.
time.sleep(0.01) # Avoid being a CPU busy loop.
num_polls += 1
if num_polls > 3000:
raise RuntimeError('poll should have returned 0 within 30 seconds')
|
olafdietsche/scrapy
|
refs/heads/master
|
scrapy/http/response/text.py
|
98
|
"""
This module implements the TextResponse class which adds encoding handling and
discovering (through HTTP headers) to base Response class.
See documentation in docs/topics/request-response.rst
"""
import six
from six.moves.urllib.parse import urljoin
from w3lib.encoding import html_to_unicode, resolve_encoding, \
html_body_declared_encoding, http_content_type_encoding
from scrapy.http.response import Response
from scrapy.utils.response import get_base_url
from scrapy.utils.python import memoizemethod_noargs, to_native_str
class TextResponse(Response):
_DEFAULT_ENCODING = 'ascii'
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
self._cached_selector = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, six.text_type):
if six.PY2 and self.encoding is None:
raise TypeError("Cannot convert unicode url - %s "
"has no encoding" % type(self).__name__)
self._url = to_native_str(url, self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = b'' # used by encoding detection
if isinstance(body, six.text_type):
if self._encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._declared_encoding() or self._body_inferred_encoding()
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
"""Return body as unicode"""
# check for self.encoding before _cached_ubody just in
# _body_inferred_encoding is called
benc = self.encoding
if self._cached_ubody is None:
charset = 'charset=%s' % benc
self._cached_ubody = html_to_unicode(charset, self.body)[1]
return self._cached_ubody
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(get_base_url(self), url)
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get(b'Content-Type', b'')
return http_content_type_encoding(to_native_str(content_type))
def _body_inferred_encoding(self):
if self._cached_benc is None:
content_type = to_native_str(self.headers.get(b'Content-Type', b''))
benc, ubody = html_to_unicode(content_type, self.body,
auto_detect_fun=self._auto_detect_fun,
default_encoding=self._DEFAULT_ENCODING)
self._cached_benc = benc
self._cached_ubody = ubody
return self._cached_benc
def _auto_detect_fun(self, text):
for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
try:
text.decode(enc)
except UnicodeError:
continue
return resolve_encoding(enc)
@memoizemethod_noargs
def _body_declared_encoding(self):
return html_body_declared_encoding(self.body)
@property
def selector(self):
from scrapy.selector import Selector
if self._cached_selector is None:
self._cached_selector = Selector(self)
return self._cached_selector
def xpath(self, query):
return self.selector.xpath(query)
def css(self, query):
return self.selector.css(query)
|
cgstudiomap/cgstudiomap
|
refs/heads/develop
|
main/parts/odoo/addons/account/tests/test_account_move_closed_period.py
|
136
|
from datetime import date
from openerp.tests.common import TransactionCase
from openerp.osv.orm import except_orm
class TestPeriodState(TransactionCase):
"""
Forbid creation of Journal Entries for a closed period.
"""
def setUp(self):
super(TestPeriodState, self).setUp()
cr, uid = self.cr, self.uid
self.wizard_period_close = self.registry('account.period.close')
self.wizard_period_close_id = self.wizard_period_close.create(cr, uid, {'sure': 1})
_, self.sale_journal_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "sales_journal")
_, self.period_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "period_0")
def test_period_state(self):
cr, uid = self.cr, self.uid
self.wizard_period_close.data_save(cr, uid, [self.wizard_period_close_id], {
'lang': 'en_US',
'active_model': 'account.period',
'active_ids': [self.period_id],
'tz': False,
'active_id': self.period_id
})
with self.assertRaises(except_orm):
self.registry('account.move').create(cr, uid, {
'name': '/',
'period_id': self.period_id,
'journal_id': self.sale_journal_id,
'date': date.today(),
'line_id': [(0, 0, {
'name': 'foo',
'debit': 10,
}), (0, 0, {
'name': 'bar',
'credit': 10,
})]
})
|
2014c2g4/w16b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_dummy_thread.py
|
742
|
"""Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
|
briancurtin/libcloud
|
refs/heads/trunk
|
docs/examples/compute/cloudsigma/create_server_with_vlan.py
|
63
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.CLOUDSIGMA)
driver = cls('username', 'password', region='zrh', api_version='2.0')
name = 'test node with vlan'
size = driver.list_sizes()[0]
image = driver.list_images()[0]
# 1. Create a VLAN. VLANs are created by purchasing a subscription.
subscription = driver.ex_create_subscription(amount=1, period='1 month',
resource='vlan', auto_renew=True)
vlan_uuid = subscription.subscribed_object
# 2. Create a node with a VLAN
node = driver.create_node(name=name, size=size, image=image,
ex_vlan=vlan_uuid)
print(node)
|
programadorjc/django
|
refs/heads/master
|
tests/utils_tests/test_dateparse.py
|
293
|
from __future__ import unicode_literals
import unittest
from datetime import date, datetime, time, timedelta
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.timezone import get_fixed_timezone
class DateParseTests(unittest.TestCase):
def test_parse_date(self):
# Valid inputs
self.assertEqual(parse_date('2012-04-23'), date(2012, 4, 23))
self.assertEqual(parse_date('2012-4-9'), date(2012, 4, 9))
# Invalid inputs
self.assertEqual(parse_date('20120423'), None)
self.assertRaises(ValueError, parse_date, '2012-04-56')
def test_parse_time(self):
# Valid inputs
self.assertEqual(parse_time('09:15:00'), time(9, 15))
self.assertEqual(parse_time('10:10'), time(10, 10))
self.assertEqual(parse_time('10:20:30.400'), time(10, 20, 30, 400000))
self.assertEqual(parse_time('4:8:16'), time(4, 8, 16))
# Invalid inputs
self.assertEqual(parse_time('091500'), None)
self.assertRaises(ValueError, parse_time, '09:15:90')
def test_parse_datetime(self):
# Valid inputs
self.assertEqual(parse_datetime('2012-04-23T09:15:00'),
datetime(2012, 4, 23, 9, 15))
self.assertEqual(parse_datetime('2012-4-9 4:8:16'),
datetime(2012, 4, 9, 4, 8, 16))
self.assertEqual(parse_datetime('2012-04-23T09:15:00Z'),
datetime(2012, 4, 23, 9, 15, 0, 0, get_fixed_timezone(0)))
self.assertEqual(parse_datetime('2012-4-9 4:8:16-0320'),
datetime(2012, 4, 9, 4, 8, 16, 0, get_fixed_timezone(-200)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02:30'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(150)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(120)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400-02'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(-120)))
# Invalid inputs
self.assertEqual(parse_datetime('20120423091500'), None)
self.assertRaises(ValueError, parse_datetime, '2012-04-56T09:15:90')
class DurationParseTests(unittest.TestCase):
def test_parse_python_format(self):
timedeltas = [
timedelta(days=4, minutes=15, seconds=30, milliseconds=100), # fractions of seconds
timedelta(hours=10, minutes=15, seconds=30), # hours, minutes, seconds
timedelta(days=4, minutes=15, seconds=30), # multiple days
timedelta(days=1, minutes=00, seconds=00), # single day
timedelta(days=-4, minutes=15, seconds=30), # negative durations
timedelta(minutes=15, seconds=30), # minute & seconds
timedelta(seconds=30), # seconds
]
for delta in timedeltas:
self.assertEqual(parse_duration(format(delta)), delta)
def test_seconds(self):
self.assertEqual(parse_duration('30'), timedelta(seconds=30))
def test_minutes_seconds(self):
self.assertEqual(parse_duration('15:30'), timedelta(minutes=15, seconds=30))
self.assertEqual(parse_duration('5:30'), timedelta(minutes=5, seconds=30))
def test_hours_minutes_seconds(self):
self.assertEqual(parse_duration('10:15:30'), timedelta(hours=10, minutes=15, seconds=30))
self.assertEqual(parse_duration('1:15:30'), timedelta(hours=1, minutes=15, seconds=30))
self.assertEqual(parse_duration('100:200:300'), timedelta(hours=100, minutes=200, seconds=300))
def test_days(self):
self.assertEqual(parse_duration('4 15:30'), timedelta(days=4, minutes=15, seconds=30))
self.assertEqual(parse_duration('4 10:15:30'), timedelta(days=4, hours=10, minutes=15, seconds=30))
def test_fractions_of_seconds(self):
self.assertEqual(parse_duration('15:30.1'), timedelta(minutes=15, seconds=30, milliseconds=100))
self.assertEqual(parse_duration('15:30.01'), timedelta(minutes=15, seconds=30, milliseconds=10))
self.assertEqual(parse_duration('15:30.001'), timedelta(minutes=15, seconds=30, milliseconds=1))
self.assertEqual(parse_duration('15:30.0001'), timedelta(minutes=15, seconds=30, microseconds=100))
self.assertEqual(parse_duration('15:30.00001'), timedelta(minutes=15, seconds=30, microseconds=10))
self.assertEqual(parse_duration('15:30.000001'), timedelta(minutes=15, seconds=30, microseconds=1))
def test_negative(self):
self.assertEqual(parse_duration('-4 15:30'), timedelta(days=-4, minutes=15, seconds=30))
def test_iso_8601(self):
self.assertEqual(parse_duration('P4Y'), None)
self.assertEqual(parse_duration('P4M'), None)
self.assertEqual(parse_duration('P4W'), None)
self.assertEqual(parse_duration('P4D'), timedelta(days=4))
self.assertEqual(parse_duration('P0.5D'), timedelta(hours=12))
self.assertEqual(parse_duration('PT5H'), timedelta(hours=5))
self.assertEqual(parse_duration('PT5M'), timedelta(minutes=5))
self.assertEqual(parse_duration('PT5S'), timedelta(seconds=5))
self.assertEqual(parse_duration('PT0.000005S'), timedelta(microseconds=5))
|
TUD-OS/seoul
|
refs/heads/master
|
model/intel82576vf/reg_mmio.py
|
6
|
# -*- Mode: Python -*-
name = "MMIO"
rset = [
# Normal R/W register
{ 'name' : 'rVTCTRL', 'offset' : 0x0, 'initial' : 0,
'callback' : 'VTCTRL_cb' },
# Normal R/O register
{ 'name' : 'rSTATUS', 'offset' : 0x8, 'initial' : int('10000011',2), # 1GB/s, UP, FD
'read-only' : True },
# Free Running Timer
{ 'name' : 'rVTFRTIMER',
'offset' : 0x1048,
'read-only' : True,
'read-compute' : 'VTFRTIMER_compute' },
# RC/W1C
{ 'name' : 'rVTEICR',
'offset' : 0x1580,
'initial' : 0,
'w1c' : True,
'rc': 0xFFFFFFFF },
{ 'name' : 'rVTEICS',
'offset' : 0x1520,
'set' : 'rVTEICR',
'callback' : 'VTEICS_cb',
'w1s' : True, # Write 1s to set. 0s are ignored
'write-only' : True },
# Interrupt Mask
{ 'name' : 'rVTEIMS',
'important' : 100,
'offset' : 0x1524,
'initial' : 0,
'w1s' : True },
{ 'name' : 'rVTEIMC',
'offset' : 0x1528,
'write-only' : True,
'w1c' : True,
'set' : 'rVTEIMS' },
# Auto-Clear
{ 'name' : 'rVTEIAC',
'offset' : 0x152c,
'initial' : 0 },
# Auto-Mask
{ 'name' : 'rVTEIAM',
'offset' : 0x1530,
'initial' : 0 },
{ 'name' : 'rVMMB',
'offset' : 0xC40,
'rc' : 11<<4,
'mutable' : ~3, # These bits are handled in VMMB_cb
'initial' : 0x80, # RSTD (PF has completed reset)
'callback' : 'VMMB_cb',
},
{ 'name' : 'rVTIVAR', 'offset' : 0x1700, 'initial' : 0, 'mutable' : 0x83838383 },
{ 'name' : 'rVTIVAR_MISC', 'offset' : 0x1740, 'initial' : 0, 'mutable' : 0x83 },
]
# Interrupt moderation
for n in range(3):
rset.append({'name' : 'rVTEITR%d' % n,
'offset' : 0x1680 + 4*n,
'initial' : 0,
'callback' : 'VTEITR_cb'})
# Mailbox memory
for n in range(0x10):
rset.append({'name' : 'rVFMBX%d' % n,
'offset' : 0x800 + 4*n,
'initial' : 0})
# EOF
|
vongochung/buiquocviet
|
refs/heads/master
|
django/utils/tree.py
|
103
|
"""
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
import copy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don't want to pass in the 'negated' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.
"""
self.children = children and children[:] or []
self.connector = connector or self.default
self.subtree_parents = []
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
_new_instance = classmethod(_new_instance)
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
in self.children]))
return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in
self.children]))
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
obj.subtree_parents = copy.deepcopy(self.subtree_parents, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __nonzero__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, node, conn_type):
"""
Adds a new node to the tree. If the conn_type is the same as the root's
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.
"""
if node in self.children and conn_type == self.connector:
return
if len(self.children) < 2:
self.connector = conn_type
if self.connector == conn_type:
if isinstance(node, Node) and (node.connector == conn_type or
len(node) == 1):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, node]
def negate(self):
"""
Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [self._new_instance(self.children, self.connector,
not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
"""
Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.
"""
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [self._new_instance(self.children, self.connector,
self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(self.__class__(self.children,
self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
def end_subtree(self):
"""
Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)
|
kakunbsc/enigma2
|
refs/heads/master
|
lib/python/Plugins/SystemPlugins/SkinSelector/plugin.py
|
2
|
# -*- coding: iso-8859-1 -*-
# (c) 2006 Stephan Reichholf
# This Software is Free, use it where you want, when you want for whatever you want and modify it if you want but don't remove my copyright!
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Plugins.Plugin import PluginDescriptor
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from os import path, walk
class SkinSelector(Screen):
# for i18n:
# _("Choose your Skin")
skinlist = []
root = "/usr/share/enigma2/"
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.skinlist = []
self.previewPath = ""
path.walk(self.root, self.find, "")
self["key_red"] = StaticText(_("Close"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self.skinlist.sort()
self["SkinList"] = MenuList(self.skinlist)
self["Preview"] = Pixmap()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions"],
{
"ok": self.ok,
"back": self.close,
"red": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"info": self.info,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
tmp = config.skin.primary_skin.value.find('/skin.xml')
if tmp != -1:
tmp = config.skin.primary_skin.value[:tmp]
idx = 0
for skin in self.skinlist:
if skin == tmp:
break
idx += 1
if idx < len(self.skinlist):
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def up(self):
self["SkinList"].up()
self.loadPreview()
def down(self):
self["SkinList"].down()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("Enigma2 Skinselector\n\nIf you experience any problems please contact\nstephan@reichholf.net\n\n\xA9 2006 - Stephan Reichholf"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def find(self, arg, dirname, names):
for x in names:
if x == "skin.xml":
if dirname <> self.root:
subdir = dirname[19:]
self.skinlist.append(subdir)
else:
subdir = "Default Skin"
self.skinlist.append(subdir)
def ok(self):
if self["SkinList"].getCurrent() == "Default Skin":
skinfile = "skin.xml"
else:
skinfile = self["SkinList"].getCurrent()+"/skin.xml"
print "Skinselector: Selected Skin: "+self.root+skinfile
config.skin.primary_skin.value = skinfile
config.skin.primary_skin.save()
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to Restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def loadPreview(self):
if self["SkinList"].getCurrent() == "Default Skin":
pngpath = self.root+"/prev.png"
else:
pngpath = self.root+self["SkinList"].getCurrent()+"/prev.png"
if not path.exists(pngpath):
pngpath = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/SkinSelector/noprev.png")
if self.previewPath != pngpath:
self.previewPath = pngpath
self["Preview"].instance.setPixmapFromFile(self.previewPath)
def restartGUI(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 3)
def SkinSelMain(session, **kwargs):
session.open(SkinSelector)
def SkinSelSetup(menuid, **kwargs):
if menuid == "system":
return [(_("Skin"), SkinSelMain, "skin_selector", None)]
else:
return []
def Plugins(**kwargs):
return PluginDescriptor(name="Skinselector", description="Select Your Skin", where = PluginDescriptor.WHERE_MENU, fnc=SkinSelSetup)
|
mrquim/mrquimrepo
|
refs/heads/master
|
repo/plugin.video.salts/scrapers/rlsbb_scraper.py
|
5
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import re
import urllib
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import XHR
from salts_lib.utils2 import i18n
import scraper
BASE_URL = 'http://rlsbb.ru'
SEARCH_BASE_URL = 'http://search.rlsbb.ru'
CATEGORIES = {VIDEO_TYPES.MOVIE: '/category/movies/"', VIDEO_TYPES.EPISODE: '/category/tv-shows/"'}
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
def __add_host(self, url, host):
parts = urlparse.urlparse(url)
return '%s://%s.%s' % (parts.scheme, host, parts.netloc)
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'ReleaseBB'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
sources = {}
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=False, cache_limit=.5)
if not html:
url = scraper_utils.urljoin(self.old_base_url, source_url)
html = self._http_get(url, require_debrid=False, cache_limit=.5)
sources.update(self.__get_post_links(html, video))
if kodi.get_setting('%s-include_comments' % (self.get_name())) == 'true':
for _attrs, comment in dom_parser2.parse_dom(html, 'div', {'id': re.compile('commentbody-\d+')}):
sources.update(self.__get_comment_links(comment, video))
for source in sources:
if scraper_utils.excluded_link(source): continue
host = urlparse.urlparse(source).hostname
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': sources[source], 'direct': False}
hosters.append(hoster)
return hosters
def __get_comment_links(self, comment, video):
sources = {}
for attrs, _content in dom_parser2.parse_dom(comment, 'a', req='href'):
stream_url = attrs['href']
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.blog_get_quality(video, stream_url, host)
sources[stream_url] = quality
return sources
def __get_post_links(self, html, video):
sources = {}
post = dom_parser2.parse_dom(html, 'div', {'class': 'postContent'})
if post:
post = post[0].content
results = re.findall('<p\s+style="text-align:\s*center;">(?:\s*<strong>)*(.*?)<br(.*?)</p>', post, re.DOTALL)
if not results:
match = re.search('>Release Name\s*:(.*?)<br', post, re.I)
release = match.group(1) if match else ''
match = re.search('>Download\s*:(.*?)</p>', post, re.DOTALL | re.I)
links = match.group(1) if match else ''
results = [(release, links)]
for result in results:
release, links = result
release = re.sub('</?[^>]*>', '', release)
for attrs, hostname in dom_parser2.parse_dom(links, 'a', req='href'):
stream_url = attrs['href']
if hostname.upper() in ['TORRENT SEARCH', 'VIP FILE']: continue
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.blog_get_quality(video, release, host)
sources[stream_url] = quality
return sources
def get_url(self, video):
return self._blog_get_url(video)
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="60" visible="eq(-3,true)"/>' % (name, i18n('filter_results_days')))
settings.append(' <setting id="%s-select" type="enum" label=" %s" lvalues="30636|30637" default="0" visible="eq(-4,true)"/>' % (name, i18n('auto_select')))
settings.append(' <setting id="%s-include_comments" type="bool" label=" %s" default="false" visible="eq(-5,true)"/>' % (name, i18n('include_comments')))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
referer = scraper_utils.urljoin(SEARCH_BASE_URL, '/search/')
headers = {'Referer': referer + urllib.quote_plus(title)}
headers.update(XHR)
search_url = scraper_utils.urljoin(SEARCH_BASE_URL, '/lib/search526049.php')
params = {'phrase': title, 'pindex': 1}
html = self._http_get(search_url, params=params, headers=headers, require_debrid=False, cache_limit=1)
js_data = scraper_utils.parse_json(html, search_url)
for post in js_data.get('results', []):
if self.__too_old(post): continue
result = self._blog_proc_results(post.get('post_title', ''), '(?P<post_title>.+)(?P<url>.*?)', '', video_type, title, year)
if result:
result[0]['url'] = scraper_utils.pathify_url(post['post_name'])
results.append(result[0])
return results
def __too_old(self, post):
filter_days = datetime.timedelta(days=int(kodi.get_setting('%s-filter' % (self.get_name()))))
post_date = post.get('post_date', '')
if filter_days and post_date:
today = datetime.date.today()
try:
post_date = scraper_utils.to_datetime(post_date, '%Y-%m-%d %H:%M:%S').date()
if today - post_date > filter_days:
return True
except ValueError:
return False
return False
|
uw-it-aca/spotseeker_server
|
refs/heads/master
|
spotseeker_server/views/buildings.py
|
1
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
""" Changes
=================================================================
sbutler1@illinois.edu: adapt to the new RESTDispatch framework;
remove needless use of regex.
"""
from spotseeker_server.views.rest_dispatch import RESTDispatch, JSONResponse
from spotseeker_server.require_auth import *
from spotseeker_server.models import Spot
from spotseeker_server.org_filters import SearchFilterChain
from spotseeker_server.views.search import SearchView
from django.http import HttpResponse
from django.core.exceptions import FieldError
class BuildingListView(RESTDispatch):
"""Performs actions on the list of buildings, at /api/v1/buildings.
GET returns 200 with a list of buildings.
"""
@app_auth_required
def GET(self, request):
chain = SearchFilterChain(request)
search_view = SearchView()
spots = SearchView.filter_on_request(
search_view, request.GET, chain, request.META, "buildings"
)
buildings = sorted(set([s.building_name for s in spots]))
return JSONResponse(buildings)
|
marcydoty/geraldo
|
refs/heads/master
|
site/newsite/django_1_0/tests/regressiontests/requests/models.py
|
281
|
# Need a models module for the test runner.
|
yoer/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/multiple_database/models.py
|
109
|
from __future__ import absolute_import
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __str__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).create(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).get_or_create(*args, **kwargs)
@python_2_unicode_compatible
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, null=True, related_name='edited')
reviews = generic.GenericRelation(Review)
pages = models.IntegerField(default=100)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
|
FHannes/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/messages/tests/middleware.py
|
556
|
from django import http
from django.contrib.messages.middleware import MessageMiddleware
from django.utils import unittest
class MiddlewareTest(unittest.TestCase):
def setUp(self):
self.middleware = MessageMiddleware()
def test_response_without_messages(self):
"""
Makes sure that the response middleware is tolerant of messages not
existing on request.
"""
request = http.HttpRequest()
response = http.HttpResponse()
self.middleware.process_response(request, response)
|
hwangsyin/cbrc-devteam-blog
|
refs/heads/master
|
lib/tornado/wsgi.py
|
18
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI support for the Tornado web framework.
WSGI is the Python standard for web servers, and allows for interoperability
between Tornado and other Python web frameworks and servers. This module
provides WSGI support in two ways:
* `WSGIApplication` is a version of `tornado.web.Application` that can run
inside a WSGI server. This is useful for running a Tornado app on another
HTTP server, such as Google App Engine. See the `WSGIApplication` class
documentation for limitations that apply.
* `WSGIContainer` lets you run other WSGI applications and frameworks on the
Tornado HTTP server. For example, with this class you can mix Django
and Tornado handlers in a single server.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
import time
import tornado
from tornado import escape
from tornado import httputil
from tornado.log import access_log
from tornado import web
from tornado.escape import native_str, parse_qs_bytes
from tornado.util import bytes_type, unicode_type
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO # python 2
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse
# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
# that are smuggled inside objects of type unicode (via the latin1 encoding).
# These functions are like those in the tornado.escape module, but defined
# here to minimize the temptation to use them in non-wsgi contexts.
if str is unicode_type:
def to_wsgi_str(s):
assert isinstance(s, bytes_type)
return s.decode('latin1')
def from_wsgi_str(s):
assert isinstance(s, str)
return s.encode('latin1')
else:
def to_wsgi_str(s):
assert isinstance(s, bytes_type)
return s
def from_wsgi_str(s):
assert isinstance(s, str)
return s
class WSGIApplication(web.Application):
"""A WSGI equivalent of `tornado.web.Application`.
`WSGIApplication` is very similar to `tornado.web.Application`,
except no asynchronous methods are supported (since WSGI does not
support non-blocking requests properly). If you call
``self.flush()`` or other asynchronous methods in your request
handlers running in a `WSGIApplication`, we throw an exception.
Example usage::
import tornado.web
import tornado.wsgi
import wsgiref.simple_server
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.wsgi.WSGIApplication([
(r"/", MainHandler),
])
server = wsgiref.simple_server.make_server('', 8888, application)
server.serve_forever()
See the `appengine demo
<https://github.com/facebook/tornado/tree/master/demos/appengine>`_
for an example of using this module to run a Tornado app on Google
App Engine.
WSGI applications use the same `.RequestHandler` class, but not
``@asynchronous`` methods or ``flush()``. This means that it is
not possible to use `.AsyncHTTPClient`, or the `tornado.auth` or
`tornado.websocket` modules.
"""
def __init__(self, handlers=None, default_host="", **settings):
web.Application.__init__(self, handlers, default_host, transforms=[],
wsgi=True, **settings)
def __call__(self, environ, start_response):
handler = web.Application.__call__(self, HTTPRequest(environ))
assert handler._finished
reason = handler._reason
status = str(handler._status_code) + " " + reason
headers = list(handler._headers.get_all())
if hasattr(handler, "_new_cookie"):
for cookie in handler._new_cookie.values():
headers.append(("Set-Cookie", cookie.OutputString(None)))
start_response(status,
[(native_str(k), native_str(v)) for (k, v) in headers])
return handler._write_buffer
class HTTPRequest(object):
"""Mimics `tornado.httpserver.HTTPRequest` for WSGI applications."""
def __init__(self, environ):
"""Parses the given WSGI environment to construct the request."""
self.method = environ["REQUEST_METHOD"]
self.path = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
self.path += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
self.uri = self.path
self.arguments = {}
self.query = environ.get("QUERY_STRING", "")
if self.query:
self.uri += "?" + self.query
self.arguments = parse_qs_bytes(native_str(self.query),
keep_blank_values=True)
self.version = "HTTP/1.1"
self.headers = httputil.HTTPHeaders()
if environ.get("CONTENT_TYPE"):
self.headers["Content-Type"] = environ["CONTENT_TYPE"]
if environ.get("CONTENT_LENGTH"):
self.headers["Content-Length"] = environ["CONTENT_LENGTH"]
for key in environ:
if key.startswith("HTTP_"):
self.headers[key[5:].replace("_", "-")] = environ[key]
if self.headers.get("Content-Length"):
self.body = environ["wsgi.input"].read(
int(self.headers["Content-Length"]))
else:
self.body = ""
self.protocol = environ["wsgi.url_scheme"]
self.remote_ip = environ.get("REMOTE_ADDR", "")
if environ.get("HTTP_HOST"):
self.host = environ["HTTP_HOST"]
else:
self.host = environ["SERVER_NAME"]
# Parse request body
self.files = {}
httputil.parse_body_arguments(self.headers.get("Content-Type", ""),
self.body, self.arguments, self.files)
self._start_time = time.time()
self._finish_time = None
def supports_http_1_1(self):
"""Returns True if this request supports HTTP/1.1 semantics"""
return self.version == "HTTP/1.1"
@property
def cookies(self):
"""A dictionary of Cookie.Morsel objects."""
if not hasattr(self, "_cookies"):
self._cookies = Cookie.SimpleCookie()
if "Cookie" in self.headers:
try:
self._cookies.load(
native_str(self.headers["Cookie"]))
except Exception:
self._cookies = None
return self._cookies
def full_url(self):
"""Reconstructs the full URL for this request."""
return self.protocol + "://" + self.host + self.uri
def request_time(self):
"""Returns the amount of time it took for this request to execute."""
if self._finish_time is None:
return time.time() - self._start_time
else:
return self._finish_time - self._start_time
class WSGIContainer(object):
r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
run it. For example::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return ["Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
This class is intended to let other frameworks (Django, web.py, etc)
run on the Tornado HTTP server and I/O loop.
The `tornado.web.FallbackHandler` class is often useful for mixing
Tornado and WSGI apps in the same server. See
https://github.com/bdarnell/django-tornado-demo for a complete example.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WSGIContainer.environ(request), start_response)
response.extend(app_response)
body = b"".join(response)
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code = int(data["status"].split()[0])
headers = data["headers"]
header_set = set(k.lower() for (k, v) in headers)
body = escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % tornado.version))
parts = [escape.utf8("HTTP/1.1 " + data["status"] + "\r\n")]
for key, value in headers:
parts.append(escape.utf8(key) + b": " + escape.utf8(value) + b"\r\n")
parts.append(b"\r\n")
parts.append(body)
request.write(b"".join(parts))
request.finish()
self._log(status_code, request)
@staticmethod
def environ(request):
"""Converts a `tornado.httpserver.HTTPRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(escape.url_unescape(
request.path, encoding=None, plus=False)),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
|
stenskjaer/scrapy
|
refs/heads/master
|
scrapy/linkextractors/__init__.py
|
18
|
"""
scrapy.linkextractors
This package contains a collection of Link Extractors.
For more info see docs/topics/link-extractors.rst
"""
import re
from six.moves.urllib.parse import urlparse
from parsel.csstranslator import HTMLTranslator
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.url import (
canonicalize_url, url_is_from_any_domain, url_has_any_extension,
)
# common file extensions that are not followed if they occur in links
IGNORED_EXTENSIONS = [
# images
'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',
# audio
'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
# video
'3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
'm4a',
# office suites
'xls', 'xlsx', 'ppt', 'pptx', 'doc', 'docx', 'odt', 'ods', 'odg', 'odp',
# other
'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',
]
_re_type = type(re.compile("", 0))
_matches = lambda url, regexs: any((r.search(url) for r in regexs))
_is_valid_url = lambda url: url.split('://', 1)[0] in set(['http', 'https', 'file'])
class FilteringLinkExtractor(object):
_csstranslator = HTMLTranslator()
def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,
restrict_xpaths, canonicalize, deny_extensions, restrict_css):
self.link_extractor = link_extractor
self.allow_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(allow)]
self.deny_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(deny)]
self.allow_domains = set(arg_to_iter(allow_domains))
self.deny_domains = set(arg_to_iter(deny_domains))
self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))
self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,
arg_to_iter(restrict_css)))
self.canonicalize = canonicalize
if deny_extensions is None:
deny_extensions = IGNORED_EXTENSIONS
self.deny_extensions = set(['.' + e for e in arg_to_iter(deny_extensions)])
def _link_allowed(self, link):
if not _is_valid_url(link.url):
return False
if self.allow_res and not _matches(link.url, self.allow_res):
return False
if self.deny_res and _matches(link.url, self.deny_res):
return False
parsed_url = urlparse(link.url)
if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
return False
if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):
return False
return True
def matches(self, url):
if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
return False
allowed = [regex.search(url) for regex in self.allow_res] if self.allow_res else [True]
denied = [regex.search(url) for regex in self.deny_res] if self.deny_res else []
return any(allowed) and not any(denied)
def _process_links(self, links):
links = [x for x in links if self._link_allowed(x)]
if self.canonicalize:
for link in links:
link.url = canonicalize_url(urlparse(link.url))
links = self.link_extractor._process_links(links)
return links
def _extract_links(self, *args, **kwargs):
return self.link_extractor._extract_links(*args, **kwargs)
# Top-level imports
from .lxmlhtml import LxmlLinkExtractor as LinkExtractor
|
pdonadeo/django-oscar
|
refs/heads/master
|
src/oscar/apps/dashboard/vouchers/app.py
|
49
|
from django.conf.urls import url
from oscar.core.application import Application
from oscar.core.loading import get_class
class VoucherDashboardApplication(Application):
name = None
default_permissions = ['is_staff', ]
list_view = get_class('dashboard.vouchers.views', 'VoucherListView')
create_view = get_class('dashboard.vouchers.views', 'VoucherCreateView')
update_view = get_class('dashboard.vouchers.views', 'VoucherUpdateView')
delete_view = get_class('dashboard.vouchers.views', 'VoucherDeleteView')
stats_view = get_class('dashboard.vouchers.views', 'VoucherStatsView')
def get_urls(self):
urls = [
url(r'^$', self.list_view.as_view(), name='voucher-list'),
url(r'^create/$', self.create_view.as_view(),
name='voucher-create'),
url(r'^update/(?P<pk>\d+)/$', self.update_view.as_view(),
name='voucher-update'),
url(r'^delete/(?P<pk>\d+)/$', self.delete_view.as_view(),
name='voucher-delete'),
url(r'^stats/(?P<pk>\d+)/$', self.stats_view.as_view(),
name='voucher-stats'),
]
return self.post_process_urls(urls)
application = VoucherDashboardApplication()
|
Eksmo/calibre
|
refs/heads/master
|
src/calibre/ebooks/mobi/langcodes.py
|
10
|
#!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
from struct import pack
from calibre.utils.localization import lang_as_iso639_1
lang_codes = {
}
main_language = {
0 : "NEUTRAL",
54 : "AFRIKAANS",
28 : "ALBANIAN",
1 : "ARABIC",
43 : "ARMENIAN",
77 : "ASSAMESE",
44 : "AZERI",
45 : "BASQUE",
35 : "BELARUSIAN",
69 : "BENGALI",
2 : "BULGARIAN",
3 : "CATALAN",
4 : "CHINESE",
26 : "CROATIAN",
5 : "CZECH",
6 : "DANISH",
19 : "DUTCH",
9 : "ENGLISH",
37 : "ESTONIAN",
56 : "FAEROESE",
41 : "FARSI",
11 : "FINNISH",
12 : "FRENCH",
55 : "GEORGIAN",
7 : "GERMAN",
8 : "GREEK",
71 : "GUJARATI",
13 : "HEBREW",
57 : "HINDI",
14 : "HUNGARIAN",
15 : "ICELANDIC",
33 : "INDONESIAN",
16 : "ITALIAN",
17 : "JAPANESE",
75 : "KANNADA",
63 : "KAZAK",
87 : "KONKANI",
18 : "KOREAN",
38 : "LATVIAN",
39 : "LITHUANIAN",
47 : "MACEDONIAN",
62 : "MALAY",
76 : "MALAYALAM",
58 : "MALTESE",
78 : "MARATHI",
97 : "NEPALI",
20 : "NORWEGIAN",
72 : "ORIYA",
21 : "POLISH",
22 : "PORTUGUESE",
70 : "PUNJABI",
23 : "RHAETOROMANIC",
24 : "ROMANIAN",
25 : "RUSSIAN",
59 : "SAMI",
79 : "SANSKRIT",
26 : "SERBIAN",
27 : "SLOVAK",
36 : "SLOVENIAN",
46 : "SORBIAN",
10 : "SPANISH",
48 : "SUTU",
65 : "SWAHILI",
29 : "SWEDISH",
73 : "TAMIL",
68 : "TATAR",
74 : "TELUGU",
30 : "THAI",
49 : "TSONGA",
50 : "TSWANA",
31 : "TURKISH",
34 : "UKRAINIAN",
32 : "URDU",
67 : "UZBEK",
42 : "VIETNAMESE",
52 : "XHOSA",
53 : "ZULU",
}
sub_language = {
0 : "NEUTRAL",
1 : "ARABIC_SAUDI_ARABIA",
2 : "ARABIC_IRAQ",
3 : "ARABIC_EGYPT",
4 : "ARABIC_LIBYA",
5 : "ARABIC_ALGERIA",
6 : "ARABIC_MOROCCO",
7 : "ARABIC_TUNISIA",
8 : "ARABIC_OMAN",
9 : "ARABIC_YEMEN",
10 : "ARABIC_SYRIA",
11 : "ARABIC_JORDAN",
12 : "ARABIC_LEBANON",
13 : "ARABIC_KUWAIT",
14 : "ARABIC_UAE",
15 : "ARABIC_BAHRAIN",
16 : "ARABIC_QATAR",
1 : "AZERI_LATIN",
2 : "AZERI_CYRILLIC",
1 : "CHINESE_TRADITIONAL",
2 : "CHINESE_SIMPLIFIED",
3 : "CHINESE_HONGKONG",
4 : "CHINESE_SINGAPORE",
1 : "DUTCH",
2 : "DUTCH_BELGIAN",
1 : "FRENCH",
2 : "FRENCH_BELGIAN",
3 : "FRENCH_CANADIAN",
4 : "FRENCH_SWISS",
5 : "FRENCH_LUXEMBOURG",
6 : "FRENCH_MONACO",
1 : "GERMAN",
2 : "GERMAN_SWISS",
3 : "GERMAN_AUSTRIAN",
4 : "GERMAN_LUXEMBOURG",
5 : "GERMAN_LIECHTENSTEIN",
1 : "ITALIAN",
2 : "ITALIAN_SWISS",
1 : "KOREAN",
1 : "LITHUANIAN",
1 : "MALAY_MALAYSIA",
2 : "MALAY_BRUNEI_DARUSSALAM",
1 : "NORWEGIAN_BOKMAL",
2 : "NORWEGIAN_NYNORSK",
2 : "PORTUGUESE",
1 : "PORTUGUESE_BRAZILIAN",
2 : "SERBIAN_LATIN",
3 : "SERBIAN_CYRILLIC",
1 : "SPANISH",
2 : "SPANISH_MEXICAN",
4 : "SPANISH_GUATEMALA",
5 : "SPANISH_COSTA_RICA",
6 : "SPANISH_PANAMA",
7 : "SPANISH_DOMINICAN_REPUBLIC",
8 : "SPANISH_VENEZUELA",
9 : "SPANISH_COLOMBIA",
10 : "SPANISH_PERU",
11 : "SPANISH_ARGENTINA",
12 : "SPANISH_ECUADOR",
13 : "SPANISH_CHILE",
14 : "SPANISH_URUGUAY",
15 : "SPANISH_PARAGUAY",
16 : "SPANISH_BOLIVIA",
17 : "SPANISH_EL_SALVADOR",
18 : "SPANISH_HONDURAS",
19 : "SPANISH_NICARAGUA",
20 : "SPANISH_PUERTO_RICO",
1 : "SWEDISH",
2 : "SWEDISH_FINLAND",
1 : "UZBEK_LATIN",
2 : "UZBEK_CYRILLIC",
}
IANA_MOBI = \
{None: {None: (0, 0)},
'af': {None: (54, 0)},
'ar': {None: (1, 0),
'AE': (1, 56),
'BH': (1, 60),
'DZ': (1, 20),
'EG': (1, 12),
'JO': (1, 44),
'KW': (1, 52),
'LB': (1, 48),
'MA': (1, 24),
'OM': (1, 32),
'QA': (1, 64),
'SA': (1, 4),
'SY': (1, 40),
'TN': (1, 28),
'YE': (1, 36)},
'as': {None: (77, 0)},
'az': {None: (44, 0)},
'be': {None: (35, 0)},
'bg': {None: (2, 0)},
'bn': {None: (69, 0)},
'ca': {None: (3, 0)},
'cs': {None: (5, 0)},
'da': {None: (6, 0)},
'de': {None: (7, 0),
'AT': (7, 12),
'CH': (7, 8),
'LI': (7, 20),
'LU': (7, 16)},
'el': {None: (8, 0)},
'en': {None: (9, 0),
'AU': (9, 12),
'BZ': (9, 40),
'CA': (9, 16),
'GB': (9, 8),
'IE': (9, 24),
'JM': (9, 32),
'NZ': (9, 20),
'PH': (9, 52),
'TT': (9, 44),
'US': (9, 4),
'ZA': (9, 28),
'ZW': (9, 48)},
'es': {None: (10, 0),
'AR': (10, 44),
'BO': (10, 64),
'CL': (10, 52),
'CO': (10, 36),
'CR': (10, 20),
'DO': (10, 28),
'EC': (10, 48),
'ES': (10, 4),
'GT': (10, 16),
'HN': (10, 72),
'MX': (10, 8),
'NI': (10, 76),
'PA': (10, 24),
'PE': (10, 40),
'PR': (10, 80),
'PY': (10, 60),
'SV': (10, 68),
'UY': (10, 56),
'VE': (10, 32)},
'et': {None: (37, 0)},
'eu': {None: (45, 0)},
'fa': {None: (41, 0)},
'fi': {None: (11, 0)},
'fo': {None: (56, 0)},
'fr': {None: (12, 0),
'BE': (12, 8),
'CA': (12, 12),
'CH': (12, 16),
'FR': (12, 4),
'LU': (12, 20),
'MC': (12, 24)},
'gu': {None: (71, 0)},
'he': {None: (13, 0)},
'hi': {None: (57, 0)},
'hr': {None: (26, 0)},
'hu': {None: (14, 0)},
'hy': {None: (43, 0)},
'id': {None: (33, 0)},
'is': {None: (15, 0)},
'it': {None: (16, 0),
'CH': (16, 8),
'IT': (16, 4)},
'ja': {None: (17, 0)},
'ka': {None: (55, 0)},
'kk': {None: (63, 0)},
'kn': {None: (75, 0)},
'ko': {None: (18, 0)},
'kok': {None: (87, 0)},
'lt': {None: (39, 0)},
'lv': {None: (38, 0)},
'mk': {None: (47, 0)},
'ml': {None: (76, 0)},
'mr': {None: (78, 0)},
'ms': {None: (62, 0)},
'mt': {None: (58, 0)},
'ne': {None: (97, 0)},
'nl': {None: (19, 0),
'BE': (19, 8)},
'no': {None: (20, 0)},
'or': {None: (72, 0)},
'pa': {None: (70, 0)},
'pl': {None: (21, 0)},
'pt': {None: (22, 0),
'BR': (22, 4),
'PT': (22, 8)},
'rm': {None: (23, 0)},
'ro': {None: (24, 0)},
'ru': {None: (25, 0)},
'sa': {None: (79, 0)},
'se': {None: (59, 0)},
'sk': {None: (27, 0)},
'sl': {None: (36, 0)},
'sq': {None: (28, 0)},
'sr': {None: (26, 12),
'RS': (26, 12)},
'st': {None: (48, 0)},
'sv': {None: (29, 0),
'FI': (29, 8)},
'sw': {None: (65, 0)},
'ta': {None: (73, 0)},
'te': {None: (74, 0)},
'th': {None: (30, 0)},
'tn': {None: (50, 0)},
'tr': {None: (31, 0)},
'ts': {None: (49, 0)},
'tt': {None: (68, 0)},
'uk': {None: (34, 0)},
'ur': {None: (32, 0)},
'uz': {None: (67, 0),
'UZ': (67, 8)},
'vi': {None: (42, 0)},
'wen': {None: (46, 0)},
'xh': {None: (52, 0)},
'zh': {None: (4, 0),
'CN': (4, 8),
'HK': (4, 12),
'SG': (4, 16),
'TW': (4, 4)},
'zu': {None: (53, 0)}}
def iana2mobi(icode):
langdict, subtags = IANA_MOBI[None], []
if icode:
subtags = list(icode.split('-'))
while len(subtags) > 0:
lang = subtags.pop(0).lower()
lang = lang_as_iso639_1(lang)
if lang and lang in IANA_MOBI:
langdict = IANA_MOBI[lang]
break
mcode = langdict[None]
while len(subtags) > 0:
subtag = subtags.pop(0)
if subtag not in langdict:
subtag = subtag.title()
if subtag not in langdict:
subtag = subtag.upper()
if subtag in langdict:
mcode = langdict[subtag]
break
return pack('>HBB', 0, mcode[1], mcode[0])
def mobi2iana(langcode, sublangcode):
prefix = suffix = None
for code, d in IANA_MOBI.items():
for subcode, t in d.items():
cc, cl = t
if cc == langcode:
prefix = code
if cl == sublangcode:
suffix = subcode.lower() if subcode else None
break
if prefix is not None:
break
if prefix is None:
return 'und'
if suffix is None:
return prefix
return prefix + '-' + suffix
|
tiagocardosos/stoq
|
refs/heads/master
|
plugins/optical/medicssearch.py
|
2
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Search dialog/Editor for publishers """
import datetime
import decimal
from kiwi.currency import currency
from stoqlib.domain.sale import Sale
from stoqlib.enums import SearchFilterPosition
from stoqlib.lib.formatters import format_quantity
from stoqlib.domain.sale import SaleView
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.dialogs.saledetails import SaleDetailsDialog
from stoqlib.gui.search.personsearch import BasePersonSearch
from stoqlib.gui.search.searchcolumns import SearchColumn, IdentifierColumn
from stoqlib.gui.search.searchfilters import DateSearchFilter
from stoqlib.gui.search.searchdialog import SearchDialog
from stoqlib.lib.translation import stoqlib_gettext
from optical.opticaldomain import OpticalMedicView, MedicSoldItemsView
from optical.opticalslave import MedicEditor
_ = stoqlib_gettext
class OpticalMedicSearch(BasePersonSearch):
title = _('Medic Search')
editor_class = MedicEditor
search_spec = OpticalMedicView
size = (750, 450)
search_lbl_text = _('Medic matching:')
result_strings = _('medic'), _('medics')
#
# SearchDialog Hooks
#
def create_filters(self):
self.set_text_field_columns(['name', 'phone_number', 'crm_number'])
def get_columns(self):
return [SearchColumn('name', title=_('Medic Name'), sorted=True,
data_type=str, expand=True),
SearchColumn('phone_number', title=_('Phone Number'),
data_type=str),
SearchColumn('crm_number', title=_('UPID'),
data_type=str),
SearchColumn('partner', title=_('Partner'), data_type=bool,
visible=False)]
def get_editor_model(self, model):
return model.medic
class MedicSalesSearch(SearchDialog):
title = _(u'Sold Items by medic')
size = (800, 450)
search_spec = MedicSoldItemsView
fast_iter = True
#
# SearchDialog Hooks
#
def setup_widgets(self):
self.add_csv_button(_('Sold Products'), _('sold-products'))
self.sale_details_button = self.add_button(label=_('Sale Details'))
self.sale_details_button.show()
self.sale_details_button.set_sensitive(False)
def update_widgets(self):
item = self.results.get_selected()
self.sale_details_button.set_sensitive(bool(item))
def create_filters(self):
self.set_text_field_columns(['medic_name', 'description', 'code'])
# Dont set a limit here, otherwise it might break the summary
executer = self.search.get_query_executer()
executer.set_limit(-1)
branch_filter = self.create_branch_filter(_('In Branch:'))
self.add_filter(branch_filter, SearchFilterPosition.TOP,
columns=[Sale.branch_id])
self._date_filter = DateSearchFilter(_("Date:"))
self._date_filter.select(data=DateSearchFilter.Type.USER_INTERVAL)
self.add_filter(self._date_filter, SearchFilterPosition.BOTTOM,
columns=[Sale.confirm_date])
self.search.set_summary_label('total', label=_(u'Total:'),
format='<b>%s</b>')
def get_columns(self):
columns = [
IdentifierColumn('identifier', title=_('Sale #')),
SearchColumn('open_date', title=_('Open date'),
data_type=datetime.date, visible=False),
SearchColumn('confirm_date', title=_('Confirm date'),
data_type=datetime.date, visible=False),
SearchColumn('code', title=_('Code'), data_type=str, sorted=True),
SearchColumn('category', title=_('Category'), data_type=str, visible=False),
SearchColumn('branch_name', title=_('Branch'), data_type=str,
visible=False),
SearchColumn('description', title=_('Description'), data_type=str,
expand=True),
SearchColumn('manufacturer', title=_('Manufacturer'), data_type=str,
visible=False),
SearchColumn('medic_name', title=_('Medic'), data_type=str),
SearchColumn('crm_number', title=_('CRM'), data_type=str),
SearchColumn('partner', title=_('Partner'), data_type=bool,
visible=False, width=40),
SearchColumn('batch_number', title=_('Batch'), data_type=str,
visible=False),
SearchColumn('batch_date', title=_('Batch Date'),
data_type=datetime.date, visible=False),
SearchColumn('quantity', title=_('Qty'), data_type=decimal.Decimal,
format_func=format_quantity),
SearchColumn('total', title=_('Total'), data_type=currency),
]
return columns
def on_sale_details_button__clicked(self, widget):
item = self.results.get_selected()
sale_view = self.store.find(SaleView, id=item.sale_id).one()
run_dialog(SaleDetailsDialog, self, self.store, sale_view)
|
ChicGeek2/app-inventor-for-android
|
refs/heads/master
|
app_inv_game_server/tests/test_custom_modules/__init__.py
|
24
|
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
__authors__ = ['"Bill Magnuson" <billmag@mit.edu>']
|
coolkang/hsbsite
|
refs/heads/master
|
settings.py
|
1
|
from __future__ import absolute_import, unicode_literals
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost',]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"moderna_app", # This is a template I am using.
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
#"mezzanine.twitter",
#"mezzanine.accounts",
#"mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username for host deploying to
# "HOSTS": ALLOWED_HOSTS[:1], # List of hosts to deploy to (eg, first host)
# "DOMAINS": ALLOWED_HOSTS, # Domains for public site
# "REPO_URL": "ssh://hg@bitbucket.org/user/project", # Project's repo URL
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "requirements.txt", # Project's pip requirements
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
####################
# HSBSITE SETTINGS #
####################
SITE_TITLE = 'hbanner'
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError as e:
if "local_settings" not in str(e):
raise e
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
|
nkysg/Asenal
|
refs/heads/master
|
script/python/python-ad-0.9/lib/ad/core/exception.py
|
4
|
#
# This file is part of Python-AD. Python-AD is free software that is made
# available under the MIT license. Consult the file "LICENSE" that is
# distributed together with this file for the exact licensing terms.
#
# Python-AD is copyright (c) 2007 by the Python-AD authors. See the file
# "AUTHORS" for a complete overview.
import ldap
class Error(Exception):
pass
LDAPError = ldap.LDAPError
|
borgr/ucca
|
refs/heads/master
|
scripts/pickle_to_standard.py
|
1
|
#!/usr/bin/python3
import argparse
import os
import sys
from ucca.ioutil import file2passage, passage2file
desc = """Parses pickle files in UCCA standard format, and writes them in XML format.
"""
def main():
argparser = argparse.ArgumentParser(description=desc)
argparser.add_argument('filenames', nargs='+', help="pickle file names to convert")
argparser.add_argument('-o', '--outdir', default='.', help="output directory")
args = argparser.parse_args()
for filename in args.filenames:
sys.stderr.write("Reading passage '%s'...\n" % filename)
passage = file2passage(filename)
basename = os.path.splitext(os.path.basename(filename))[0]
outfile = args.outdir + os.path.sep + basename + ".xml"
sys.stderr.write("Writing file '%s'...\n" % outfile)
passage2file(passage, outfile)
sys.exit(0)
if __name__ == '__main__':
main()
|
anryko/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/univention/udm_group.py
|
37
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright: (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: udm_group
version_added: "2.2"
author:
- Tobias Rüetschi (@keachi)
short_description: Manage of the posix group
description:
- "This module allows to manage user groups on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the group is present or not.
name:
required: true
description:
- Name of the posix group.
description:
required: false
description:
- Group description.
position:
required: false
description:
- define the whole ldap position of the group, e.g.
C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
ou:
required: false
description:
- LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
subpath:
required: false
description:
- Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
'''
EXAMPLES = '''
# Create a POSIX group
- udm_group:
name: g123m-1A
# Create a POSIX group with the exact DN
# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
- udm_group:
name: g123m-1A
subpath: 'cn=classes,cn=students,cn=groups'
ou: school
# or
- udm_group:
name: g123m-1A
position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
'''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True,
type='str'),
description=dict(default=None,
type='str'),
position=dict(default='',
type='str'),
ou=dict(default='',
type='str'),
subpath=dict(default='cn=groups',
type='str'),
state=dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True
)
name = module.params['name']
description = module.params['description']
position = module.params['position']
ou = module.params['ou']
subpath = module.params['subpath']
state = module.params['state']
changed = False
diff = None
groups = list(ldap_search(
'(&(objectClass=posixGroup)(cn={0}))'.format(name),
attr=['cn']
))
if position != '':
container = position
else:
if ou != '':
ou = 'ou={0},'.format(ou)
if subpath != '':
subpath = '{0},'.format(subpath)
container = '{0}{1}{2}'.format(subpath, ou, base_dn())
group_dn = 'cn={0},{1}'.format(name, container)
exists = bool(len(groups))
if state == 'present':
try:
if not exists:
grp = umc_module_for_add('groups/group', container)
else:
grp = umc_module_for_edit('groups/group', group_dn)
grp['name'] = name
grp['description'] = description
diff = grp.diff()
changed = grp.diff() != []
if not module.check_mode:
if not exists:
grp.create()
else:
grp.modify()
except Exception:
module.fail_json(
msg="Creating/editing group {0} in {1} failed".format(name, container)
)
if state == 'absent' and exists:
try:
grp = umc_module_for_edit('groups/group', group_dn)
if not module.check_mode:
grp.remove()
changed = True
except Exception:
module.fail_json(
msg="Removing group {0} failed".format(name)
)
module.exit_json(
changed=changed,
name=name,
diff=diff,
container=container
)
if __name__ == '__main__':
main()
|
GeorgeTG/plex-series-organizer
|
refs/heads/master
|
series_orginizer.py
|
1
|
#!/usr/bin/env python3
LICENSE = """
PlexSeriesOrganizer - A tool to organize your plex series, without moving files.
Copyright (C) 2015 George T. Gougoudis <george_gougoudis@hotmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
VERSION = 0.7
import argparse
import re
from os import listdir, symlink, makedirs, remove
from os.path import isfile, join, splitext, isdir, abspath, basename
from sys import argv,exit
MARGIN = 16
class Colors(object):
OKB= '\033[94m'
OKG = '\033[92m'
WRN = '\033[93m'
ERR = '\033[91m'
NRM = "\x1B[0m"
RED = "\x1B[31m"
GRN = "\x1B[32m"
YEL = "\x1B[33m"
BLU = "\x1B[34m"
MAG = "\x1B[35m"
CYN = "\x1B[36m"
WHT = "\x1B[37m"
BLD = '\033[1m'
ULN = '\033[4m'
RST = '\033[0m'
@staticmethod
def print_sep(color='GRN', length = 64):
Colors.print(color, '-' * (length + MARGIN))
@staticmethod
def print_header(text):
print(Colors.MAG + Colors.BLD + text + Colors.RST)
@staticmethod
def print_entry(entry):
print(Colors.RED + ']> ' + Colors.GRN + entry + Colors.RST)
@staticmethod
def print_list(list_p ):
for item in list_p:
Colors.print_entry(item)
def print_dict(dict_p, fmt='{0} <-> {1}'):
for key, value in dict_p.items():
Colors.print_entry(fmt.format(key, value))
@staticmethod
def print(color, text):
if isinstance(color, str):
color = Colors.__dict__[color]
print(color + text + Colors.RST)
@staticmethod
def print_err(err):
print("{0}{1}{2}{3}".format(Colors.ERR, '[ERROR] ', err, Colors.RST))
@staticmethod
def print_wrn(warn):
print("{0}{1}{2}{3}".format(Colors.WRN, '[WARN] ', warn, Colors.RST))
class Bundle:
def __init__(self, **entries):
self.__dict__.update(entries)
def as_dict(self):
return self.__dict__
def dir_getfiles(directory):
return [ f for f in listdir(directory) if isfile(join(directory,f)) ]
def check_file(path):
if not isfile(path):
Colors.print_err('Not a file: {0}'.format(path))
return False
return True
def check_dir(path):
if not isdir(path):
Colors.print_err('Not a directory: {0}'.format(path))
return False
return True
def parse_args():
parser = argparse.ArgumentParser(
description='Fixes bad episode names by creating symlinks')
parser.add_argument('--source','-i',
type=str, metavar='[dir]',
required=True, dest='source_dir',
help="The source directory to scan for video files"
)
parser.add_argument('--match', '-m',
type=str, metavar='[pattern]',
dest='pattern', required=True,
help = """
The RegEx pattern to match episode number.
Must extract said number as a group.
"""
)
parser.add_argument('--episodes', '-e',
type=str, metavar='[file]',
dest='episodes_file', default=None,
help= """
The file that contains all episodes.
Should be in format number:name
"""
)
parser.add_argument('--season', '-s',
type=int, metavar='[number]',
required=True
)
parser.add_argument('--name', '-n',
type=str, metavar='[name]',
required=True,
help = "Series name"
)
parser.add_argument('--dest', '-o',
type=str, metavar='[dir]', dest='dest_dir',
default=None, help="Destination directory"
)
parser.add_argument('--force','-f',
action='store_true',
help = "Force overwrite of links. WARNING: Data loss posible"
)
parser.add_argument('--create-dirs','-c',
action='store_true', dest='create_dirs',
help = "Create 'SeriesName/SeasonXX/ dirs in output dir"
)
parser.add_argument('--version',
action='version', version='%(prog)s ' + str(VERSION)
)
return parser.parse_args()
valid_ans = ['Y','y', 'N', 'n']
def prompt_yes_no(message):
ans = None
print('{0} [{1}]'.format(message, ', '.join(valid_ans)))
while True:
ans = str(input())
if ans not in valid_ans:
Colors.print_err("Valid answers: [{0}]".format(", ".join(valid_ans)))
else:
break
if ans.lower() == 'n':
return False
else:
return True
def scan_dir(directory, pattern):
"""
Scans 'directory' for matching files according to regex 'pattern'.
Returns: a dictionary with the follow format
# "i" -> "whatever_whatever_[i]_name.mkv
"""
regex = re.compile(pattern)
matched_files={}
#get all files from directory
files = dir_getfiles(directory)
for candinate in files:
match = regex.match(candinate)
if match:
if len(match.groups()) != 1:
Colors.print_wrn("File: {0} didn't match correctly".format(candinate))
continue
else:
#we have a match and a group to obtain ep. number specified
ep_num = match.group(1)
matched_files[ep_num] = candinate
return matched_files
def parse_episodes_file(filename):
"""
Parse given file to obtain episode names
Current format is EpNumber:EpName for each line
Returns: Dict:
EpNum->EpName
"""
episode_names = {}
try:
with open(filename, "r") as f:
#all lines in file, striped from whitespace
lines = [line.rstrip('\r\n') for line in f]
except Exception as ex:
print(str(ex))
exit(-1)
for i, line in enumerate(lines):
parts = line.split(':')
if len(parts) != 2:
Colors.print_wrn('Line[{0}]: Bad format'.format(i+1))
continue
episode_names[parts[0]] = parts[1]
return episode_names
def format_link(original_file, series_info, episode_info):
#ShowName - sXXeYY - Optional_Info.ext
args = series_info.as_dict().copy()
args['ep_num'] = episode_info.number
args['ep_name'] = episode_info.name
_, ext = splitext(original_file)
if args['ep_name']:
fmt_str= '{name} - s{season:0>2}e{ep_num:0>2} - {ep_name}{0}'
else:
fmt_str= '{name} - s{season:0>2}e{ep_num:0>2}{0}'
return fmt_str.format(ext, **args)
def prepare_links(files, series_info, episode_names=None):
links_map = {}
episode_info = Bundle()
for ep_num, candinate in files.original.items():
original_file = abspath(join(files.source_dir, candinate))
if not files.dest_dir:
files.dest_dir = files.source_dir
episode_info.name = None #default
if episode_names:
if ep_num in episode_names:
episode_info.name = episode_names[ep_num]
else:
Colors.print_wrn('Episode[{0}] not found in episodes file'.\
format(ep_num))
episode_info.number = ep_num
ep_filename = format_link(original_file, series_info, episode_info)
links_map[original_file] = abspath(join(files.dest_dir, ep_filename))
return links_map
def fix_with_file(files, series_info, options):
Colors.print('CYN', 'Source directory: ' + files.source_dir)
Colors.print_dict(files.original, 'Matched episode [{1}] number: [{0}]')
if options.create_dirs:
files.dest_dir = join(
files.dest_dir,
series_info.name,
'Season {0:0>2}'.format(series_info.season)
)
if files.episodes_file and check_file(files.episodes_file):
episode_names = parse_episodes_file(files.episodes_file)
if len(episode_names) > 0:
Colors.print_header('\nFound in episodes file:')
Colors.print_dict(episode_names)
if len(files.original) != len(episode_names):
Colors.print_wrn(
"Number of episodes mathced differs from definitions in file")
else:
episode_names = None
else:
episode_names = None
links_map = prepare_links(files, series_info, episode_names)
Colors.print('CYN', '\nDestination directory: ' + files.dest_dir)
Colors.print_header('Theese links will be created:')
Colors.print_list(basename(f) for f in links_map.values())
if not prompt_yes_no("Continue?"):
print('Aborting...')
exit(0)
if not isdir(files.dest_dir):
makedirs(abspath(files.dest_dir))
for source, dest in links_map.items():
#Force is specified, delete dest file
if options.force and isfile(dest):
print('[-f] Deleting existing file: {0}'.format(basename(dest)))
remove(dest)
try:
symlink(source, dest)
except Exception as ex:
print(str(ex))
Colors.print_header('Done!')
def main():
args = parse_args()
if not check_dir(args.source_dir):
exit(-1)
series_info = Bundle(**{
'name': args.name,
'season': args.season,
})
files_info = Bundle(**{
'source_dir': args.source_dir,
'dest_dir': args.dest_dir if args.dest_dir else args.source_dir,
'episodes_file': args.episodes_file,
'original': scan_dir(args.source_dir, args.pattern)
})
options = Bundle(**{
'force': args.force,
'create_dirs': args.create_dirs
})
fix_with_file(files_info, series_info, options)
if __name__=='__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.