repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
pschmitt/home-assistant | refs/heads/dev | tests/components/august/test_gateway.py | 6 | """The gateway tests for the august platform."""
from homeassistant.components.august.const import DOMAIN
from homeassistant.components.august.gateway import AugustGateway
from tests.async_mock import MagicMock, patch
from tests.components.august.mocks import _mock_august_authentication, _mock_get_config
async def test_refresh_access_token(hass):
"""Test token refreshes."""
await _patched_refresh_access_token(hass, "new_token", 5678)
@patch("homeassistant.components.august.gateway.AuthenticatorAsync.async_authenticate")
@patch("homeassistant.components.august.gateway.AuthenticatorAsync.should_refresh")
@patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_refresh_access_token"
)
async def _patched_refresh_access_token(
hass,
new_token,
new_token_expire_time,
refresh_access_token_mock,
should_refresh_mock,
authenticate_mock,
):
authenticate_mock.side_effect = MagicMock(
return_value=_mock_august_authentication("original_token", 1234)
)
august_gateway = AugustGateway(hass)
mocked_config = _mock_get_config()
await august_gateway.async_setup(mocked_config[DOMAIN])
await august_gateway.async_authenticate()
should_refresh_mock.return_value = False
await august_gateway.async_refresh_access_token_if_needed()
refresh_access_token_mock.assert_not_called()
should_refresh_mock.return_value = True
refresh_access_token_mock.return_value = _mock_august_authentication(
new_token, new_token_expire_time
)
await august_gateway.async_refresh_access_token_if_needed()
refresh_access_token_mock.assert_called()
assert august_gateway.access_token == new_token
assert august_gateway.authentication.access_token_expires == new_token_expire_time
|
chinmaygarde/mojo | refs/heads/ios | third_party/cython/src/Cython/Compiler/Scanning.py | 90 | # cython: infer_types=True, language_level=3, py2_import=True
#
# Cython Scanner
#
import os
import platform
import cython
cython.declare(EncodedString=object, any_string_prefix=unicode, IDENT=unicode,
print_function=object)
from Cython import Utils
from Cython.Plex.Scanners import Scanner
from Cython.Plex.Errors import UnrecognizedInput
from Errors import error
from Lexicon import any_string_prefix, make_lexicon, IDENT
from Future import print_function
from StringEncoding import EncodedString
debug_scanner = 0
trace_scanner = 0
scanner_debug_flags = 0
scanner_dump_file = None
lexicon = None
def get_lexicon():
global lexicon
if not lexicon:
lexicon = make_lexicon()
return lexicon
#------------------------------------------------------------------
py_reserved_words = [
"global", "nonlocal", "def", "class", "print", "del", "pass", "break",
"continue", "return", "raise", "import", "exec", "try",
"except", "finally", "while", "if", "elif", "else", "for",
"in", "assert", "and", "or", "not", "is", "in", "lambda",
"from", "yield", "with", "nonlocal",
]
pyx_reserved_words = py_reserved_words + [
"include", "ctypedef", "cdef", "cpdef",
"cimport", "DEF", "IF", "ELIF", "ELSE"
]
class Method(object):
def __init__(self, name):
self.name = name
self.__name__ = name # for Plex tracing
def __call__(self, stream, text):
return getattr(stream, self.name)(text)
#------------------------------------------------------------------
class CompileTimeScope(object):
def __init__(self, outer = None):
self.entries = {}
self.outer = outer
def declare(self, name, value):
self.entries[name] = value
def update(self, other):
self.entries.update(other)
def lookup_here(self, name):
return self.entries[name]
def __contains__(self, name):
return name in self.entries
def lookup(self, name):
try:
return self.lookup_here(name)
except KeyError:
outer = self.outer
if outer:
return outer.lookup(name)
else:
raise
def initial_compile_time_env():
benv = CompileTimeScope()
names = ('UNAME_SYSNAME', 'UNAME_NODENAME', 'UNAME_RELEASE',
'UNAME_VERSION', 'UNAME_MACHINE')
for name, value in zip(names, platform.uname()):
benv.declare(name, value)
try:
import __builtin__ as builtins
except ImportError:
import builtins
names = ('False', 'True',
'abs', 'all', 'any', 'ascii', 'bin', 'bool', 'bytearray', 'bytes',
'chr', 'cmp', 'complex', 'dict', 'divmod', 'enumerate', 'filter',
'float', 'format', 'frozenset', 'hash', 'hex', 'int', 'len',
'list', 'long', 'map', 'max', 'min', 'oct', 'ord', 'pow', 'range',
'repr', 'reversed', 'round', 'set', 'slice', 'sorted', 'str',
'sum', 'tuple', 'xrange', 'zip')
for name in names:
try:
benv.declare(name, getattr(builtins, name))
except AttributeError:
# ignore, likely Py3
pass
denv = CompileTimeScope(benv)
return denv
#------------------------------------------------------------------
class SourceDescriptor(object):
"""
A SourceDescriptor should be considered immutable.
"""
_file_type = 'pyx'
_escaped_description = None
_cmp_name = ''
def __str__(self):
assert False # To catch all places where a descriptor is used directly as a filename
def set_file_type_from_name(self, filename):
name, ext = os.path.splitext(filename)
self._file_type = ext in ('.pyx', '.pxd', '.py') and ext[1:] or 'pyx'
def is_cython_file(self):
return self._file_type in ('pyx', 'pxd')
def is_python_file(self):
return self._file_type == 'py'
def get_escaped_description(self):
if self._escaped_description is None:
self._escaped_description = \
self.get_description().encode('ASCII', 'replace').decode("ASCII")
return self._escaped_description
def __gt__(self, other):
# this is only used to provide some sort of order
try:
return self._cmp_name > other._cmp_name
except AttributeError:
return False
def __lt__(self, other):
# this is only used to provide some sort of order
try:
return self._cmp_name < other._cmp_name
except AttributeError:
return False
def __le__(self, other):
# this is only used to provide some sort of order
try:
return self._cmp_name <= other._cmp_name
except AttributeError:
return False
class FileSourceDescriptor(SourceDescriptor):
"""
Represents a code source. A code source is a more generic abstraction
for a "filename" (as sometimes the code doesn't come from a file).
Instances of code sources are passed to Scanner.__init__ as the
optional name argument and will be passed back when asking for
the position()-tuple.
"""
def __init__(self, filename, path_description=None):
filename = Utils.decode_filename(filename)
self.path_description = path_description or filename
self.filename = filename
self.set_file_type_from_name(filename)
self._cmp_name = filename
self._lines = {}
def get_lines(self, encoding=None, error_handling=None):
# we cache the lines only the second time this is called, in
# order to save memory when they are only used once
key = (encoding, error_handling)
try:
lines = self._lines[key]
if lines is not None:
return lines
except KeyError:
pass
f = Utils.open_source_file(
self.filename, encoding=encoding,
error_handling=error_handling,
# newline normalisation is costly before Py2.6
require_normalised_newlines=False)
try:
lines = list(f)
finally:
f.close()
if key in self._lines:
self._lines[key] = lines
else:
# do not cache the first access, but remember that we
# already read it once
self._lines[key] = None
return lines
def get_description(self):
return self.path_description
def get_error_description(self):
path = self.filename
cwd = Utils.decode_filename(os.getcwd() + os.path.sep)
if path.startswith(cwd):
return path[len(cwd):]
return path
def get_filenametable_entry(self):
return self.filename
def __eq__(self, other):
return isinstance(other, FileSourceDescriptor) and self.filename == other.filename
def __hash__(self):
return hash(self.filename)
def __repr__(self):
return "<FileSourceDescriptor:%s>" % self.filename
class StringSourceDescriptor(SourceDescriptor):
"""
Instances of this class can be used instead of a filenames if the
code originates from a string object.
"""
filename = None
def __init__(self, name, code):
self.name = name
#self.set_file_type_from_name(name)
self.codelines = [x + "\n" for x in code.split("\n")]
self._cmp_name = name
def get_lines(self, encoding=None, error_handling=None):
if not encoding:
return self.codelines
else:
return [ line.encode(encoding, error_handling).decode(encoding)
for line in self.codelines ]
def get_description(self):
return self.name
get_error_description = get_description
def get_filenametable_entry(self):
return "stringsource"
def __hash__(self):
return id(self)
# Do not hash on the name, an identical string source should be the
# same object (name is often defaulted in other places)
# return hash(self.name)
def __eq__(self, other):
return isinstance(other, StringSourceDescriptor) and self.name == other.name
def __repr__(self):
return "<StringSourceDescriptor:%s>" % self.name
#------------------------------------------------------------------
class PyrexScanner(Scanner):
# context Context Compilation context
# included_files [string] Files included with 'include' statement
# compile_time_env dict Environment for conditional compilation
# compile_time_eval boolean In a true conditional compilation context
# compile_time_expr boolean In a compile-time expression context
def __init__(self, file, filename, parent_scanner = None,
scope = None, context = None, source_encoding=None, parse_comments=True, initial_pos=None):
Scanner.__init__(self, get_lexicon(), file, filename, initial_pos)
if parent_scanner:
self.context = parent_scanner.context
self.included_files = parent_scanner.included_files
self.compile_time_env = parent_scanner.compile_time_env
self.compile_time_eval = parent_scanner.compile_time_eval
self.compile_time_expr = parent_scanner.compile_time_expr
else:
self.context = context
self.included_files = scope.included_files
self.compile_time_env = initial_compile_time_env()
self.compile_time_eval = 1
self.compile_time_expr = 0
if hasattr(context.options, 'compile_time_env') and \
context.options.compile_time_env is not None:
self.compile_time_env.update(context.options.compile_time_env)
self.parse_comments = parse_comments
self.source_encoding = source_encoding
if filename.is_python_file():
self.in_python_file = True
self.keywords = set(py_reserved_words)
else:
self.in_python_file = False
self.keywords = set(pyx_reserved_words)
self.trace = trace_scanner
self.indentation_stack = [0]
self.indentation_char = None
self.bracket_nesting_level = 0
self.begin('INDENT')
self.sy = ''
self.next()
def commentline(self, text):
if self.parse_comments:
self.produce('commentline', text)
def current_level(self):
return self.indentation_stack[-1]
def open_bracket_action(self, text):
self.bracket_nesting_level = self.bracket_nesting_level + 1
return text
def close_bracket_action(self, text):
self.bracket_nesting_level = self.bracket_nesting_level - 1
return text
def newline_action(self, text):
if self.bracket_nesting_level == 0:
self.begin('INDENT')
self.produce('NEWLINE', '')
string_states = {
"'": 'SQ_STRING',
'"': 'DQ_STRING',
"'''": 'TSQ_STRING',
'"""': 'TDQ_STRING'
}
def begin_string_action(self, text):
while text[:1] in any_string_prefix:
text = text[1:]
self.begin(self.string_states[text])
self.produce('BEGIN_STRING')
def end_string_action(self, text):
self.begin('')
self.produce('END_STRING')
def unclosed_string_action(self, text):
self.end_string_action(text)
self.error("Unclosed string literal")
def indentation_action(self, text):
self.begin('')
# Indentation within brackets should be ignored.
#if self.bracket_nesting_level > 0:
# return
# Check that tabs and spaces are being used consistently.
if text:
c = text[0]
#print "Scanner.indentation_action: indent with", repr(c) ###
if self.indentation_char is None:
self.indentation_char = c
#print "Scanner.indentation_action: setting indent_char to", repr(c)
else:
if self.indentation_char != c:
self.error("Mixed use of tabs and spaces")
if text.replace(c, "") != "":
self.error("Mixed use of tabs and spaces")
# Figure out how many indents/dedents to do
current_level = self.current_level()
new_level = len(text)
#print "Changing indent level from", current_level, "to", new_level ###
if new_level == current_level:
return
elif new_level > current_level:
#print "...pushing level", new_level ###
self.indentation_stack.append(new_level)
self.produce('INDENT', '')
else:
while new_level < self.current_level():
#print "...popping level", self.indentation_stack[-1] ###
self.indentation_stack.pop()
self.produce('DEDENT', '')
#print "...current level now", self.current_level() ###
if new_level != self.current_level():
self.error("Inconsistent indentation")
def eof_action(self, text):
while len(self.indentation_stack) > 1:
self.produce('DEDENT', '')
self.indentation_stack.pop()
self.produce('EOF', '')
def next(self):
try:
sy, systring = self.read()
except UnrecognizedInput:
self.error("Unrecognized character")
if sy == IDENT:
if systring in self.keywords:
if systring == u'print' and print_function in self.context.future_directives:
self.keywords.discard('print')
systring = EncodedString(systring)
elif systring == u'exec' and self.context.language_level >= 3:
self.keywords.discard('exec')
systring = EncodedString(systring)
else:
sy = systring
else:
systring = EncodedString(systring)
self.sy = sy
self.systring = systring
if False: # debug_scanner:
_, line, col = self.position()
if not self.systring or self.sy == self.systring:
t = self.sy
else:
t = "%s %s" % (self.sy, self.systring)
print("--- %3d %2d %s" % (line, col, t))
def peek(self):
saved = self.sy, self.systring
self.next()
next = self.sy, self.systring
self.unread(*next)
self.sy, self.systring = saved
return next
def put_back(self, sy, systring):
self.unread(self.sy, self.systring)
self.sy = sy
self.systring = systring
def unread(self, token, value):
# This method should be added to Plex
self.queue.insert(0, (token, value))
def error(self, message, pos = None, fatal = True):
if pos is None:
pos = self.position()
if self.sy == 'INDENT':
err = error(pos, "Possible inconsistent indentation")
err = error(pos, message)
if fatal: raise err
def expect(self, what, message = None):
if self.sy == what:
self.next()
else:
self.expected(what, message)
def expect_keyword(self, what, message = None):
if self.sy == IDENT and self.systring == what:
self.next()
else:
self.expected(what, message)
def expected(self, what, message = None):
if message:
self.error(message)
else:
if self.sy == IDENT:
found = self.systring
else:
found = self.sy
self.error("Expected '%s', found '%s'" % (what, found))
def expect_indent(self):
self.expect('INDENT',
"Expected an increase in indentation level")
def expect_dedent(self):
self.expect('DEDENT',
"Expected a decrease in indentation level")
def expect_newline(self, message = "Expected a newline"):
# Expect either a newline or end of file
if self.sy != 'EOF':
self.expect('NEWLINE', message)
|
shanot/imp | refs/heads/develop | modules/em2d/test/test_images_opencv.py | 2 | from __future__ import print_function
import IMP
import IMP.test
import IMP.em2d as em2d
import os
import random
class Tests(IMP.test.TestCase):
def test_read_and_write_opencv_images(self):
"""Test read/write for the images using OpenCV data storage"""
srw = em2d.SpiderImageReaderWriter()
img = em2d.Image()
img.read(
self.get_input_file_name("1gyt-subject-1-0.5-SNR.spi"), srw)
rows = int(img.get_header().get_number_of_rows())
cols = int(img.get_header().get_number_of_columns())
self.assertEqual(rows, 128, "Error reading image")
self.assertEqual(cols, 128, "Error reading image")
temp = "opencv_test_image.spi"
img.write(temp, srw)
img2 = em2d.Image()
img2.read(temp, srw)
for i in range(0, rows):
for j in range(0, cols):
self.assertAlmostEqual(img(i, j), img2(i, j), delta=0.001,
msg="Generated image is different from stored")
os.remove(temp)
def test_variance_filter(self):
"""Test that the variance filter is working"""
srw = em2d.SpiderImageReaderWriter()
img = em2d.Image()
img.read(
self.get_input_file_name("1z5s-projection-2.spi"), srw)
filtered = em2d.Image()
kernelsize = 7
em2d.apply_variance_filter(img, filtered, kernelsize)
saved = em2d.Image()
saved.read(
self.get_input_file_name("filtered_image.spi"), srw)
rows = int(img.get_header().get_number_of_rows())
cols = int(img.get_header().get_number_of_columns())
for i in range(0, rows):
for j in range(0, cols):
self.assertAlmostEqual(
saved(i, j), filtered(i, j), delta=0.001,
msg="Generated image is different from stored")
def test_substract(self):
"""Test subtracting images"""
srw = em2d.SpiderImageReaderWriter()
rows = int(10)
cols = int(5)
img1 = em2d.Image(rows, cols)
img2 = em2d.Image(rows, cols)
result = em2d.Image(rows, cols)
for i in range(0, rows):
for j in range(0, cols):
img1.set_value(i, j, random.uniform(-1, 1))
img2.set_value(i, j, img1(i, j))
em2d.do_subtract_images(img1, img2, result)
for i in range(0, rows):
for j in range(0, cols):
self.assertAlmostEqual(abs(result(i, j)), 0, delta=0.001,
msg="Subtract images error")
def test_polar_resampling(self):
"""Test of polar resampling of images"""
srw = em2d.SpiderImageReaderWriter()
fn_input = self.get_input_file_name("1gyt-subject-1-0.5-SNR.spi")
img = em2d.Image(fn_input, srw)
polar_params = em2d.PolarResamplingParameters()
polar = em2d.Image()
em2d.do_resample_polar(img, polar, polar_params)
fn_saved = self.get_input_file_name("1gyt-subject-1-0.5-SNR-polar.spi")
saved = em2d.Image(fn_saved, srw)
rows = int(polar.get_header().get_number_of_rows())
cols = int(polar.get_header().get_number_of_columns())
for i in range(0, rows):
for j in range(0, cols):
self.assertAlmostEqual(saved(i, j), polar(i, j), delta=0.001,
msg="Generated polar image is different from stored"
" row %d col %d" % (i, j))
def test_read_jpg(self):
"""Test of JPGReaderWriter reading"""
srw = em2d.SpiderImageReaderWriter()
jrw = em2d.JPGImageReaderWriter()
fn_jpg_img = self.get_input_file_name("lena-256x256.jpg")
jpg_img = em2d.Image(fn_jpg_img, jrw)
fn_spider_img = self.get_input_file_name("lena-256x256.spi")
spider_img = em2d.Image(fn_spider_img, srw)
rows = int(jpg_img.get_header().get_number_of_rows())
cols = int(jpg_img.get_header().get_number_of_columns())
self.assertEqual(spider_img.get_header().get_number_of_rows(), rows)
self.assertEqual(spider_img.get_header().get_number_of_columns(), cols)
for i in range(0, rows):
for j in range(0, cols):
# due to rounding, integer numbers in the jpg file can vary
# to the next integer. Allow delta 1
self.assertAlmostEqual(
abs(spider_img(i, j) - jpg_img(i, j)), 0,
delta=1, msg="JPG image is not equal to spider image "
"at pixel (%d,%d)" % (i, j))
def test_write_jpg(self):
"""Test of JPGReaderWriter writing"""
jrw = em2d.JPGImageReaderWriter()
fn_img1 = self.get_input_file_name("lena-256x256.jpg")
img1 = em2d.Image(fn_img1, jrw)
fn_img2 = "temp.jpg"
img1.write(fn_img2, jrw)
img2 = em2d.Image(fn_img2, jrw)
# Use the ccc for testing instead of the pixel values. The matrix
# in img2 is transformed from floats to ints son it can be written.
# Values can change, but the ccc has to be very close to 1.
ccc = em2d.get_cross_correlation_coefficient(img1.get_data(),
img2.get_data())
self.assertAlmostEqual(ccc, 1, delta=0.05,
msg="Written JPG image is not equal to read ")
os.remove(fn_img2)
def test_write_error_jpg(self):
"""Test that writing with JPGReaderWriter fails with bad extension"""
jrw = em2d.JPGImageReaderWriter()
fn_img1 = self.get_input_file_name("lena-256x256.jpg")
img1 = em2d.Image(fn_img1, jrw)
self.assertRaises(IOError, img1.write, "temp.xxx", jrw)
def test_read_tiff(self):
"""Test of TIFFReaderWriter reading"""
srw = em2d.SpiderImageReaderWriter()
trw = em2d.TIFFImageReaderWriter()
fn_tif_img = self.get_input_file_name("lena-256x256.tif")
tif_img = em2d.Image(fn_tif_img, trw)
fn_spider_img = self.get_input_file_name("lena-256x256.spi")
spider_img = em2d.Image(fn_spider_img, srw)
rows = int(tif_img.get_header().get_number_of_rows())
cols = int(tif_img.get_header().get_number_of_columns())
self.assertEqual(spider_img.get_header().get_number_of_rows(), rows)
self.assertEqual(spider_img.get_header().get_number_of_columns(), cols)
ccc = em2d.get_cross_correlation_coefficient(tif_img.get_data(),
spider_img.get_data())
self.assertAlmostEqual(ccc, 1, delta=0.01, msg="ccc ins not 1")
def test_write_tiff(self):
"""Test of TIFFReaderWriter writing"""
trw = em2d.TIFFImageReaderWriter()
srw = em2d.SpiderImageReaderWriter()
fn_img1 = self.get_input_file_name("lena-256x256.tif")
img1 = em2d.Image(fn_img1, trw)
fn_img2 = "temp.tif"
img1.write(fn_img2, trw)
img2 = em2d.Image(fn_img2, trw)
# Use the ccc for testing instead of the pixel values. The matrix
# in img2 is transformed from floats to ints son it can be written.
# Values can change, but the ccc has to be very close to 1.
ccc = em2d.get_cross_correlation_coefficient(img1.get_data(),
img2.get_data())
print(ccc)
self.assertAlmostEqual(ccc, 1, delta=0.01,
msg="Written TIFF image is not equal to read ")
os.remove(fn_img2)
def test_write_error_tiff(self):
"""Test that writing with TIFFReaderWriter fails with bad extension"""
trw = em2d.TIFFImageReaderWriter()
fn_img1 = self.get_input_file_name("lena-256x256.tif")
img1 = em2d.Image(fn_img1, trw)
self.assertRaises(IOError, img1.write, "temp.xxx", trw)
def test_do_extend_borders(self):
"""Test that extending the borders of an image is done correctly"""
srw = em2d.SpiderImageReaderWriter()
fn_img1 = self.get_input_file_name("lena-256x256.spi")
img1 = em2d.Image(fn_img1, srw)
img2 = em2d.Image()
border = 10
em2d.do_extend_borders(img1, img2, border)
rows2 = int(img2.get_header().get_number_of_rows())
cols2 = int(img2.get_header().get_number_of_columns())
self.assertEqual(rows2, 256 + 2 * border,
"Border rows are not extended properly")
self.assertEqual(cols2, 256 + 2 * border,
"Border columns are not extended properly")
for i in range(0, rows2):
for j in range(0, cols2):
if(i < border or i > (rows2 - border)):
self.assertAlmostEqual(img2(i, j), 0, delta=0.001,
msg="Borders are not zero at row %d col %d" % (i, j))
if(j < border or i > (cols2 - border)):
self.assertAlmostEqual(img2(i, j), 0, delta=0.001,
msg="Borders are not zero at row %d col %d" % (i, j))
def test_get_overlap_percentage(self):
""" Test that the function returns > 0 when there is overlap, and 0
otherwise """
# TODO
pass
def test_crop(self):
img = em2d.Image()
img.set_size(20, 20)
em2d.crop(img, [10, 10], 10)
self.assertEqual(img.get_header().get_number_of_rows(), 10,
"Crop size is incorrect")
#em2d.crop(img,[10,10], 20);
def test_fill_outside_circle(self):
img = em2d.Image()
size = 20
radius = 6
center = size / 2
img.set_size(size, size)
img.set_zeros()
n = 0
mean = 0
border = 4
for i in range(border, size - border + 1):
for j in range(border, size - border + 1):
if ((i - center) ** 2 + (j - center) ** 2) ** 0.5 <= radius:
val = i * j
img.set_value(i, j, val)
n += 1
mean += val
mean /= n
em2d.apply_mean_outside_mask(img, radius)
pix = range(0, size)
for i in pix:
for j in pix:
if ((i - center) ** 2 + (j - center) ** 2) ** 0.5 <= radius:
val = i * j
self.assertEqual(img(i, j), val)
else:
self.assertAlmostEqual(img(i, j), mean, delta=0.01)
if __name__ == '__main__':
IMP.test.main()
|
kevinmel2000/sl4a | refs/heads/master | python/src/Mac/Modules/ibcarbon/IBCarbonscan.py | 34 | # IBCarbonscan.py
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner_OSX
def main():
print "---Scanning IBCarbonRuntime.h---"
input = ["IBCarbonRuntime.h"]
output = "IBCarbongen.py"
defsoutput = TOOLBOXDIR + "IBCarbonRuntime.py"
scanner = IBCarbon_Scanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "--done scanning, importing--"
import IBCarbonsupport
print "done"
class IBCarbon_Scanner(Scanner_OSX):
def destination(self, type, name, arglist):
classname = "IBCarbonFunction"
listname = "functions"
if arglist:
t, n, m = arglist[0]
if t == "IBNibRef" and m == "InMode":
classname = "IBCarbonMethod"
listname = "methods"
return classname, listname
def makeblacklistnames(self):
return [
"DisposeNibReference", # taken care of by destructor
"CreateNibReferenceWithCFBundle", ## need to wrap CFBundle.h properly first
]
def makerepairinstructions(self):
return []
if __name__ == "__main__":
main()
|
kalvdans/scipy | refs/heads/master | scipy/io/harwell_boeing/hb.py | 83 | """
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
from __future__ import division, print_function, absolute_import
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from scipy.io.harwell_boeing._fortran_format_parser import \
FortranFormatParser, IntFormat, ExpFormat
from scipy._lib.six import string_types
__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo(object):
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containg a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = int
else:
raise ValueError("Unsupported format for values %r" % (values_format,))
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value)
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType(object):
"""Class to hold the matrix type."""
# q2f* translates qualified names to fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError:
raise ValueError("Unrecognized format %s" % fmt)
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError("Unrecognized type %s" % value_type)
if structure not in self._q2f_structure:
raise ValueError("Unrecognized structure %s" % structure)
if storage not in self._q2f_storage:
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile(object):
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo, optional
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise IOError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(file):
"""Read HB-format file.
Parameters
----------
file : str-like or file-like
If a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if isinstance(file, string_types):
fid = open(file)
try:
return _get_matrix(fid)
finally:
fid.close()
else:
return _get_matrix(file)
def hb_write(file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
file : str-like or file-like
if a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if isinstance(file, string_types):
fid = open(file, "w")
try:
return _set_matrix(fid)
finally:
fid.close()
else:
return _set_matrix(file)
|
intgr/django | refs/heads/master | tests/select_related_onetoone/models.py | 106 | from django.db import models
class User(models.Model):
username = models.CharField(max_length=100)
email = models.EmailField()
def __str__(self):
return self.username
class UserProfile(models.Model):
user = models.OneToOneField(User, models.CASCADE)
city = models.CharField(max_length=100)
state = models.CharField(max_length=2)
def __str__(self):
return "%s, %s" % (self.city, self.state)
class UserStatResult(models.Model):
results = models.CharField(max_length=50)
def __str__(self):
return 'UserStatResults, results = %s' % (self.results,)
class UserStat(models.Model):
user = models.OneToOneField(User, models.CASCADE, primary_key=True)
posts = models.IntegerField()
results = models.ForeignKey(UserStatResult, models.CASCADE)
def __str__(self):
return 'UserStat, posts = %s' % (self.posts,)
class StatDetails(models.Model):
base_stats = models.OneToOneField(UserStat, models.CASCADE)
comments = models.IntegerField()
def __str__(self):
return 'StatDetails, comments = %s' % (self.comments,)
class AdvancedUserStat(UserStat):
karma = models.IntegerField()
class Image(models.Model):
name = models.CharField(max_length=100)
class Product(models.Model):
name = models.CharField(max_length=100)
image = models.OneToOneField(Image, models.SET_NULL, null=True)
class Parent1(models.Model):
name1 = models.CharField(max_length=50)
def __str__(self):
return self.name1
class Parent2(models.Model):
# Avoid having two "id" fields in the Child1 subclass
id2 = models.AutoField(primary_key=True)
name2 = models.CharField(max_length=50)
def __str__(self):
return self.name2
class Child1(Parent1, Parent2):
value = models.IntegerField()
def __str__(self):
return self.name1
class Child2(Parent1):
parent2 = models.OneToOneField(Parent2, models.CASCADE)
value = models.IntegerField()
def __str__(self):
return self.name1
class Child3(Child2):
value3 = models.IntegerField()
class Child4(Child1):
value4 = models.IntegerField()
class LinkedList(models.Model):
name = models.CharField(max_length=50)
previous_item = models.OneToOneField(
'self', models.CASCADE,
related_name='next_item',
blank=True, null=True,
)
|
TeppieC/M-ords | refs/heads/master | mords_backend/mords_api/migrations/0002_auto_20161108_0630.py | 2 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-08 06:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mords_api', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='book',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='note',
options={'ordering': ['word', 'text', 'author']},
),
migrations.RenameField(
model_name='book',
old_name='bookName',
new_name='name',
),
migrations.RenameField(
model_name='learner',
old_name='vocab_book',
new_name='book',
),
migrations.RenameField(
model_name='learner',
old_name='words_perday',
new_name='words_perDay',
),
migrations.AddField(
model_name='note',
name='pub_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
CalebBell/thermo | refs/heads/master | docs/plots/PR_maximum_pressure.py | 1 | import matplotlib.pyplot as plt
import numpy as np
from thermo import *
from fluids.numerics import logspace
from math import log10
thing = PR(P=1e5, T=460, Tc=658.0, Pc=1820000.0, omega=0.9)
Ts = logspace(log10(1000), log10(2000000), 100)
Ps = []
for T in Ts:
try:
Ps.append(thing.to(T=T, V=thing.V_l).P)
except ValueError:
Ps.append(1e5)
plt.loglog(Ts, Ps)
plt.xlabel('Temperature [K]')
plt.ylabel('Pressure [Pa]')
plt.title("Constant-volume calculated pressure at V=%.8f m^3/mol" %(thing.V_l))
#plt.show()
|
bratsche/Neutron-Drive | refs/heads/master | google_appengine/lib/django_1_2/django/conf/urls/i18n.py | 383 | from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^setlang/$', 'django.views.i18n.set_language'),
)
|
sa2ajj/DistroTracker | refs/heads/upstream | pts/project/__init__.py | 1 | # Copyright 2013 The Distro Tracker Developers
# See the COPYRIGHT file at the top-level directory of this distribution and
# at http://deb.li/DTAuthors
#
# This file is part of Distro Tracker. It is subject to the license terms
# in the LICENSE file found in the top-level directory of this
# distribution and at http://deb.li/DTLicense. No part of Distro Tracker,
# including this file, may be copied, modified, propagated, or distributed
# except according to the terms contained in the LICENSE file.
"""Package containing the main project settings."""
|
thinkopensolutions/l10n-brazil | refs/heads/10.0 | l10n_br_account/tests/__init__.py | 3 | # -*- coding: utf-8 -*-
# Copyright (C) 2017 Daniel Sadamo - KMEE INFORMATICA LTDA
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import test_account_invoice
|
liaoyuke/EECS542_Final_Project_VQA | refs/heads/master | code/concept_extraction/lib/en/parser/nltk_lite/contrib/marshalbrill.py | 9 | # Natural Language Toolkit: Brill Tagger
#
# Copyright (C) 2001-2005 University of Pennsylvania
# Authors: Christopher Maloof <cjmaloof@gradient.cis.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Brill's transformational rule-based tagger.
"""
from en.parser.nltk_lite.tag import TagI
import bisect # for binary search through a subset of indices
import os # for finding WSJ files
import random # for shuffling WSJ files
import sys # for getting command-line arguments
import re # for performing regular expression matching
######################################################################
## The Brill Tagger
######################################################################
class Brill(TagI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
X{initial tagger} (such as L{tag.Default}) to assign an intial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the L{BrillRuleI}
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using either
L{BrillTrainer} or L{FastBrillTrainer}.
"""
# TODO: move into __init__() when all marshalling classes will be moved into
# standard tree
_classname = "BrillTagger"
def __init__(self, initial_tagger, rules):
"""
@param initial_tagger: The initial tagger
@type initial_tagger: L{TagI}
@param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
@type rules: C{list} of L{BrillRuleI}
"""
self._initial_tagger = initial_tagger
self._rules = rules
def rules(self):
return self._rules[:]
def tag (self, tokens):
# Inherit documentation from TagI
# Run the initial tagger.
tagged_tokens = list(self._initial_tagger.tag(tokens))
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = {}
for i, (token, tag) in enumerate(tagged_tokens):
if tag not in tag_to_positions:
tag_to_positions[tag] = set([i])
else:
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag(), [])
# Apply the rule at those positions.
changed = rule.apply_at(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag()].remove(i)
if rule.replacement_tag() not in tag_to_positions:
tag_to_positions[rule.replacement_tag()] = set([i])
else:
tag_to_positions[rule.replacement_tag()].add(i)
for t in tagged_tokens:
yield t
# marshal() and unmarshal() methods by Tiago Tresoldi <tresoldi@users.sf.net>
def marshal (self, filename):
"""
Marshals (saves to a plain text file) the tagger model.
@param filename: Name of the file to which save the model (will
be overwritten if it already exists).
@type filename: C{string}
"""
handler = file(filename, "w")
for rule in self.rules():
handler.write("%s\n" % rule)
handler.close()
def unmarshal (self, filename):
"""
Unmarshals (loads from a plain text file) the tagger model. This
operation will override any previously stored rules.
@param filename: Name of the file from which the model will
be read.
@type filename: C{string}
"""
rule_a = re.compile(r"^(.+) -> (.+) if the (.+) of words i([+-]\d+)...i([+-]\d+) is '(.+)'$", re.UNICODE)
rule_b = re.compile(r"^(.+) -> (.+) if the (.+) of the (.+) word is '(.+)'$", re.UNICODE)
# erase any previous rules
self._rules = []
# load from file
handler = file(filename, "r")
lines = handler.readlines()
handler.close()
# remove '\n's, even though $ would catch them
lines = [line[:-1] for line in lines]
# remove empty lines
lines = [line for line in lines if len(line)>0]
# parse rules
for rule in lines:
match = re.match(rule_b, rule)
if match:
groups = list( match.groups() )
if groups[3] == "preceding":
groups.pop(3)
groups.insert(3, "-1")
groups.insert(4, "-1")
else:
groups.pop(3)
groups.insert(3, "1")
groups.insert(4, "1")
else:
match = re.match(rule_a, rule)
groups = list( match.groups() )
conditions = (int(groups[3]), int(groups[4]), groups[5])
if groups[2] == "tag":
r = ProximateTagsRule(groups[0], groups[1], conditions)
else:
r = ProximateWordsRule(groups[0], groups[1], conditions)
self._rules.append(r)
######################################################################
## Brill Rules
######################################################################
class BrillRuleI(object):
"""
An interface for tag transformations on a tagged corpus, as
performed by brill taggers. Each transformation finds all tokens
in the corpus that are tagged with a specific X{original tag} and
satisfy a specific X{condition}, and replaces their tags with a
X{replacement tag}. For any given transformation, the original
tag, replacement tag, and condition are fixed. Conditions may
depend on the token under consideration, as well as any other
tokens in the corpus.
Brill rules must be comparable and hashable.
"""
def apply_to(self, tokens):
"""
Apply this rule everywhere it applies in the corpus. I.e.,
for each token in the corpus that is tagged with this rule's
original tag, and that satisfies this rule's condition, set
its tag to be this rule's replacement tag.
@param tokens: The tagged corpus
@type tokens: C{list} of C{tuple}
@return: The indices of tokens whose tags were changed by this
rule.
@rtype: C{list} of C{int}
"""
return self.apply_at(tokens, range(len(tokens)))
def apply_at(self, tokens, positions):
"""
Apply this rule at every position in C{positions} where it
applies to the corpus. I.e., for each position M{p} in
C{positions}, if C{tokens[M{p}]} is tagged with this rule's
original tag, and satisfies this rule's condition, then set
its tag to be this rule's replacement tag.
@param tokens: The tagged corpus
@type tokens: list of Token
@type positions: C{list} of C{int}
@param positions: The positions where the transformation is to
be tried.
@return: The indices of tokens whose tags were changed by this
rule.
@rtype: C{int}
"""
assert False, "BrillRuleI is an abstract interface"
def applies(self, tokens, index):
"""
@return: True if the rule would change the tag of
C{tokens[index]}, False otherwise
@rtype: Boolean
@param tokens: A tagged corpus
@type tokens: list of Token
@param index: The index to check
@type index: int
"""
assert False, "BrillRuleI is an abstract interface"
def original_tag(self):
"""
@return: The tag which this C{BrillRuleI} may cause to be
replaced.
@rtype: any
"""
assert False, "BrillRuleI is an abstract interface"
def replacement_tag(self):
"""
@return: the tag with which this C{BrillRuleI} may replace
another tag.
@rtype: any
"""
assert False, "BrillRuleI is an abstract interface"
# Rules must be comparable and hashable for the algorithm to work
def __eq__(self):
assert False, "Brill rules must be comparable"
def __hash__(self):
assert False, "Brill rules must be hashable"
class ProximateTokensRule(BrillRuleI):
"""
An abstract base class for brill rules whose condition checks for
the presence of tokens with given properties at given ranges of
positions, relative to the token.
Each subclass of proximate tokens brill rule defines a method
M{extract_property}, which extracts a specific property from the
the token, such as its text or tag. Each instance is
parameterized by a set of tuples, specifying ranges of positions
and property values to check for in those ranges:
- (M{start}, M{end}, M{value})
The brill rule is then applicable to the M{n}th token iff:
- The M{n}th token is tagged with the rule's original tag; and
- For each (M{start}, M{end}, M{value}) triple:
- The property value of at least one token between
M{n+start} and M{n+end} (inclusive) is M{value}.
For example, a proximate token brill template with M{start=end=-1}
generates rules that check just the property of the preceding
token. Note that multiple properties may be included in a single
rule; the rule applies if they all hold.
"""
def __init__(self, original_tag, replacement_tag, *conditions):
"""
Construct a new brill rule that changes a token's tag from
C{original_tag} to C{replacement_tag} if all of the properties
specified in C{conditions} hold.
@type conditions: C{tuple} of C{(int, int, *)}
@param conditions: A list of 3-tuples C{(start, end, value)},
each of which specifies that the property of at least one
token between M{n}+C{start} and M{n}+C{end} (inclusive) is
C{value}.
@raise ValueError: If C{start}>C{end} for any condition.
"""
assert self.__class__ != ProximateTokensRule, \
"ProximateTokensRule is an abstract base class"
self._original = original_tag
self._replacement = replacement_tag
self._conditions = conditions
for (s,e,v) in conditions:
if s>e:
raise ValueError('Condition %s has an invalid range' %
((s,e,v),))
def extract_property(token): # [staticmethod]
"""
Returns some property characterizing this token, such as its
base lexical item or its tag.
Each implentation of this method should correspond to an
implementation of the method with the same name in a subclass
of L{ProximateTokensTemplate}.
@param token: The token
@type token: Token
@return: The property
@rtype: any
"""
assert False, "ProximateTokensRule is an abstract interface"
extract_property = staticmethod(extract_property)
def apply_at(self, tokens, positions):
# Inherit docs from BrillRuleI
# Find all locations where the rule is applicable
change = []
for i in positions:
if self.applies(tokens, i):
change.append(i)
# Make the changes. Note: this must be done in a separate
# step from finding applicable locations, since we don't want
# the rule to interact with itself.
for i in change:
(token, tag) = tokens[i]
tokens[i] = (token, self._replacement)
return change
def applies(self, tokens, index):
# Inherit docs from BrillRuleI
# Does the given token have this rule's "original tag"?
if tokens[index][1] != self._original:
return False
# Check to make sure that every condition holds.
for (start, end, val) in self._conditions:
# Find the (absolute) start and end indices.
s = max(0, index+start)
e = min(index+end+1, len(tokens))
# Look for *any* token that satisfies the condition.
for i in range(s, e):
if self.extract_property(tokens[i]) == val:
break
else:
# No token satisfied the condition; return false.
return False
# Every condition checked out, so the rule is applicable.
return True
def original_tag(self):
# Inherit docs from BrillRuleI
return self._original
def replacement_tag(self):
# Inherit docs from BrillRuleI
return self._replacement
def __eq__(self, other):
return (other != None and
other.__class__ == self.__class__ and
self._original == other._original and
self._replacement == other._replacement and
self._conditions == other._conditions)
def __hash__(self):
# Needs to include extract_property in order to distinguish subclasses
# A nicer way would be welcome.
return hash( (self._original, self._replacement, self._conditions,
self.extract_property.func_code) )
def __repr__(self):
conditions = ' and '.join(['%s in %d...%d' % (v,s,e)
for (s,e,v) in self._conditions])
return '<%s: %s->%s if %s>' % (self.__class__.__name__,
self._original, self._replacement,
conditions)
def __str__(self):
replacement = '%s -> %s' % (self._original,
self._replacement)
if len(self._conditions) == 0:
conditions = ''
else:
conditions = ' if '+ ', and '.join([self._condition_to_str(c)
for c in self._conditions])
return replacement+conditions
def _condition_to_str(self, condition):
"""
Return a string representation of the given condition.
This helper method is used by L{__str__}.
"""
(start, end, value) = condition
return ('the %s of %s is %r' %
(self.PROPERTY_NAME, self._range_to_str(start, end), value))
def _range_to_str(self, start, end):
"""
Return a string representation for the given range. This
helper method is used by L{__str__}.
"""
if start == end == 0:
return 'this word'
if start == end == -1:
return 'the preceding word'
elif start == end == 1:
return 'the following word'
elif start == end and start < 0:
return 'word i-%d' % -start
elif start == end and start > 0:
return 'word i+%d' % start
else:
if start >= 0: start = '+%d' % start
if end >= 0: end = '+%d' % end
return 'words i%s...i%s' % (start, end)
class ProximateTagsRule(ProximateTokensRule):
"""
A rule which examines the tags of nearby tokens.
@see: superclass L{ProximateTokensRule} for details.
@see: L{ProximateTagsTemplate}, which generates these rules.
"""
PROPERTY_NAME = 'tag' # for printing.
def extract_property(token): # [staticmethod]
"""@return: The given token's tag."""
return token[1]
extract_property = staticmethod(extract_property)
class ProximateWordsRule(ProximateTokensRule):
"""
A rule which examines the base types of nearby tokens.
@see: L{ProximateTokensRule} for details.
@see: L{ProximateWordsTemplate}, which generates these rules.
"""
PROPERTY_NAME = 'text' # for printing.
def extract_property(token): # [staticmethod]
"""@return: The given token's text."""
return token[0]
extract_property = staticmethod(extract_property)
######################################################################
## Brill Templates
######################################################################
class BrillTemplateI(object):
"""
An interface for generating lists of transformational rules that
apply at given corpus positions. C{BrillTemplateI} is used by
C{Brill} training algorithms to generate candidate rules.
"""
def __init__(self):
raise AssertionError, "BrillTemplateI is an abstract interface"
def applicable_rules(self, tokens, i, correctTag):
"""
Return a list of the transformational rules that would correct
the C{i}th subtoken's tag in the given token. In particular,
return a list of zero or more rules that would change
C{tagged_tokens[i][1]} to C{correctTag}, if applied
to C{token}.
If the C{i}th subtoken already has the correct tag (i.e., if
C{tagged_tokens[i][1]} == C{correctTag}), then
C{applicable_rules} should return the empty list.
@param token: The tagged tokens being tagged.
@type token: C{list} of C{tuple}
@param i: The index of the token whose tag should be corrected.
@type i: C{int}
@param correctTag: The correct tag for the C{i}th token.
@type correctTag: (any)
@rtype: C{list} of L{BrillRuleI}
"""
raise AssertionError, "BrillTemplateI is an abstract interface"
def get_neighborhood(self, token, index):
"""
Returns the set of indices C{i} such that
C{applicable_rules(token, index, ...)} depends on the value of
the C{i}th subtoken of C{token}.
This method is used by the \"fast\" Brill tagger trainer.
@param token: The tokens being tagged.
@type token: C{list} of C{tuple}
@param index: The index whose neighborhood should be returned.
@type index: C{int}
@rtype: C{Set}
"""
raise AssertionError, "BrillTemplateI is an abstract interface"
class ProximateTokensTemplate(BrillTemplateI):
"""
An brill templates that generates a list of
L{ProximateTokensRule}s that apply at a given corpus
position. In particular, each C{ProximateTokensTemplate} is
parameterized by a proximate token brill rule class and a list of
boundaries, and generates all rules that:
- use the given brill rule class
- use the given list of boundaries as the C{start} and C{end}
points for their conditions
- are applicable to the given token.
"""
def __init__(self, rule_class, *boundaries):
"""
Construct a template for generating proximate token brill
rules.
@type rule_class: C{class}
@param rule_class: The proximate token brill rule class that
should be used to generate new rules. This class must be a
subclass of L{ProximateTokensRule}.
@type boundaries: C{tuple} of C{(int, int)}
@param boundaries: A list of tuples C{(start, end)}, each of
which specifies a range for which a condition should be
created by each rule.
@raise ValueError: If C{start}>C{end} for any boundary.
"""
self._rule_class = rule_class
self._boundaries = boundaries
for (s,e) in boundaries:
if s>e:
raise ValueError('Boundary %s has an invalid range' %
((s,e),))
def applicable_rules(self, tokens, index, correct_tag):
if tokens[index][1] == correct_tag:
return []
# For each of this template's boundaries, Find the conditions
# that are applicable for the given token.
applicable_conditions = \
[self._applicable_conditions(tokens, index, start, end)
for (start, end) in self._boundaries]
# Find all combinations of these applicable conditions. E.g.,
# if applicable_conditions=[[A,B], [C,D]], then this will
# generate [[A,C], [A,D], [B,C], [B,D]].
condition_combos = [[]]
for conditions in applicable_conditions:
condition_combos = [old_conditions+[new_condition]
for old_conditions in condition_combos
for new_condition in conditions]
# Translate the condition sets into rules.
return [self._rule_class(tokens[index][1], correct_tag, *conds)
for conds in condition_combos]
def _applicable_conditions(self, tokens, index, start, end):
"""
@return: A set of all conditions for proximate token rules
that are applicable to C{tokens[index]}, given boundaries of
C{(start, end)}. I.e., return a list of all tuples C{(start,
end, M{value})}, such the property value of at least one token
between M{index+start} and M{index+end} (inclusive) is
M{value}.
"""
conditions = set()
s = max(0, index+start)
e = min(index+end+1, len(tokens))
for i in range(s, e):
value = self._rule_class.extract_property(tokens[i])
conditions.add( (start, end, value) )
return conditions
def get_neighborhood(self, tokens, index):
# inherit docs from BrillTemplateI
neighborhood = set([index])
for (start, end) in self._boundaries:
s = max(0, index+start)
e = min(index+end+1, len(tokens))
for i in range(s, e):
neighborhood.add(i)
return neighborhood
class SymmetricProximateTokensTemplate(BrillTemplateI):
"""
Simulates two L{ProximateTokensTemplate}s which are symmetric
across the location of the token. For rules of the form \"If the
M{n}th token is tagged C{A}, and any tag preceding B{or} following
the M{n}th token by a distance between M{x} and M{y} is C{B}, and
... , then change the tag of the nth token from C{A} to C{C}.\"
One C{ProximateTokensTemplate} is formed by passing in the
same arguments given to this class's constructor: tuples
representing intervals in which a tag may be found. The other
C{ProximateTokensTemplate} is constructed with the negative
of all the arguments in reversed order. For example, a
C{SymmetricProximateTokensTemplate} using the pair (-2,-1) and the
constructor C{ProximateTagsTemplate} generates the same rules as a
C{ProximateTagsTemplate} using (-2,-1) plus a second
C{ProximateTagsTemplate} using (1,2).
This is useful because we typically don't want templates to
specify only \"following\" or only \"preceding\"; we'd like our
rules to be able to look in either direction.
"""
def __init__(self, rule_class, *boundaries):
"""
Construct a template for generating proximate token brill
rules.
@type rule_class: C{class}
@param rule_class: The proximate token brill rule class that
should be used to generate new rules. This class must be a
subclass of L{ProximateTokensRule}.
@type boundaries: C{tuple} of C{(int, int)}
@param boundaries: A list of tuples C{(start, end)}, each of
which specifies a range for which a condition should be
created by each rule.
@raise ValueError: If C{start}>C{end} for any boundary.
"""
self._ptt1 = ProximateTokensTemplate(rule_class, *boundaries)
reversed = [(-e,-s) for (s,e) in boundaries]
self._ptt2 = ProximateTokensTemplate(rule_class, *reversed)
# Generates lists of a subtype of ProximateTokensRule.
def applicable_rules(self, tokens, index, correctTag):
"""
See L{BrillTemplateI} for full specifications.
@rtype: list of ProximateTokensRule
"""
return (self._ptt1.applicable_rules(tokens, index, correctTag) +
self._ptt2.applicable_rules(tokens, index, correctTag))
def get_neighborhood(self, tokens, index):
# inherit docs from BrillTemplateI
n1 = self._ptt1.get_neighborhood(tokens, index)
n2 = self._ptt2.get_neighborhood(tokens, index)
return n1.union(n2)
######################################################################
## Brill Tagger Trainer
######################################################################
class BrillTrainer(object):
"""
A trainer for brill taggers.
"""
def __init__(self, initial_tagger, templates, trace=0):
self._initial_tagger = initial_tagger
self._templates = templates
self._trace = trace
#////////////////////////////////////////////////////////////
# Training
#////////////////////////////////////////////////////////////
def train(self, train_tokens, max_rules=200, min_score=2):
"""
Trains the Brill tagger on the corpus C{train_token},
producing at most C{max_rules} transformations, each of which
reduces the net number of errors in the corpus by at least
C{min_score}.
@type train_tokens: C{list} of L{tuple}
@param train_tokens: The corpus of tagged tokens
@type max_rules: C{int}
@param max_rules: The maximum number of transformations to be created
@type min_score: C{int}
@param min_score: The minimum acceptable net error reduction
that each transformation must produce in the corpus.
"""
if self._trace > 0: print ("Training Brill tagger on %d tokens..." %
len(train_tokens))
# Create a new copy of the training token, and run the initial
# tagger on this. We will progressively update this test
# token to look more like the training token.
test_tokens = list(self._initial_tagger.tag(t[0] for t in train_tokens))
if self._trace > 2: self._trace_header()
# Look for useful rules.
rules = []
try:
while len(rules) < max_rules:
old_tags = [t[1] for t in test_tokens]
(rule, score, fixscore) = self._best_rule(test_tokens,
train_tokens)
if rule is None or score < min_score:
if self._trace > 1:
print 'Insufficient improvement; stopping'
break
else:
# Add the rule to our list of rules.
rules.append(rule)
# Use the rules to update the test token.
k = rule.apply_to(test_tokens)
# Display trace output.
if self._trace > 1:
self._trace_rule(rule, score, fixscore, len(k))
# The user can also cancel training manually:
except KeyboardInterrupt: pass
# Create and return a tagger from the rules we found.
return Brill(self._initial_tagger, rules)
#////////////////////////////////////////////////////////////
# Finding the best rule
#////////////////////////////////////////////////////////////
# Finds the rule that makes the biggest net improvement in the corpus.
# Returns a (rule, score) pair.
def _best_rule(self, test_tokens, train_tokens):
# Create a dictionary mapping from each tag to a list of the
# indices that have that tag in both test_tokens and
# train_tokens (i.e., where it is correctly tagged).
correct_indices = {}
for i in range(len(test_tokens)):
if test_tokens[i][1] == train_tokens[i][1]:
tag = test_tokens[i][1]
correct_indices.setdefault(tag, []).append(i)
# Find all the rules that correct at least one token's tag,
# and the number of tags that each rule corrects (in
# descending order of number of tags corrected).
rules = self._find_rules(test_tokens, train_tokens)
# Keep track of the current best rule, and its score.
best_rule, best_score, best_fixscore = None, 0, 0
# Consider each rule, in descending order of fixscore (the
# number of tags that the rule corrects, not including the
# number that it breaks).
for (rule, fixscore) in rules:
# The actual score must be <= fixscore; so if best_score
# is bigger than fixscore, then we already have the best
# rule.
if best_score >= fixscore:
return best_rule, best_score, best_fixscore
# Calculate the actual score, by decrementing fixscore
# once for each tag that the rule changes to an incorrect
# value.
score = fixscore
if correct_indices.has_key(rule.original_tag()):
for i in correct_indices[rule.original_tag()]:
if rule.applies(test_tokens, i):
score -= 1
# If the score goes below best_score, then we know
# that this isn't the best rule; so move on:
if score <= best_score: break
#print '%5d %5d %s' % (fixscore, score, rule)
# If the actual score is better than the best score, then
# update best_score and best_rule.
if score > best_score:
best_rule, best_score, best_fixscore = rule, score, fixscore
# Return the best rule, and its score.
return best_rule, best_score, best_fixscore
def _find_rules(self, test_tokens, train_tokens):
"""
Find all rules that correct at least one token's tag in
C{test_tokens}.
@return: A list of tuples C{(rule, fixscore)}, where C{rule}
is a brill rule and C{fixscore} is the number of tokens
whose tag the rule corrects. Note that C{fixscore} does
I{not} include the number of tokens whose tags are changed
to incorrect values.
"""
# Create a list of all indices that are incorrectly tagged.
error_indices = [i for i in range(len(test_tokens))
if (test_tokens[i][1] !=
train_tokens[i][1])]
# Create a dictionary mapping from rules to their positive-only
# scores.
rule_score_dict = {}
for i in range(len(test_tokens)):
rules = self._find_rules_at(test_tokens, train_tokens, i)
for rule in rules:
rule_score_dict[rule] = rule_score_dict.get(rule,0) + 1
# Convert the dictionary into a list of (rule, score) tuples,
# sorted in descending order of score.
rule_score_items = rule_score_dict.items()
temp = [(-score, rule) for (rule, score) in rule_score_items]
temp.sort()
return [(rule, -negscore) for (negscore, rule) in temp]
def _find_rules_at(self, test_tokens, train_tokens, i):
"""
@rtype: C{Set}
@return: the set of all rules (based on the templates) that
correct token C{i}'s tag in C{test_tokens}.
"""
applicable_rules = set()
if test_tokens[i][1] != train_tokens[i][1]:
correct_tag = train_tokens[i][1]
for template in self._templates:
new_rules = template.applicable_rules(test_tokens, i,
correct_tag)
applicable_rules.update(new_rules)
return applicable_rules
#////////////////////////////////////////////////////////////
# Tracing
#////////////////////////////////////////////////////////////
def _trace_header(self):
print """
B |
S F r O | Score = Fixed - Broken
c i o t | R Fixed = num tags changed incorrect -> correct
o x k h | u Broken = num tags changed correct -> incorrect
r e e e | l Other = num tags changed incorrect -> incorrect
e d n r | e
------------------+-------------------------------------------------------
""".rstrip()
def _trace_rule(self, rule, score, fixscore, numchanges):
if self._trace > 2:
print ('%4d%4d%4d%4d ' % (score, fixscore, fixscore-score,
numchanges-fixscore*2+score)), '|',
print rule
######################################################################
## Fast Brill Tagger Trainer
######################################################################
class FastBrillTrainer(object):
"""
A faster trainer for brill taggers.
"""
def __init__(self, initial_tagger, templates, trace=0):
self._initial_tagger = initial_tagger
self._templates = templates
self._trace = trace
#////////////////////////////////////////////////////////////
# Training
#////////////////////////////////////////////////////////////
def train(self, train_tokens, max_rules=200, min_score=2):
# If TESTING is true, extra computation is done to determine whether
# each "best" rule actually reduces net error by the score it received.
TESTING = False
# Basic idea: Keep track of the rules that apply at each position.
# And keep track of the positions to which each rule applies.
# The set of somewhere-useful rules that apply at each position
rulesByPosition = []
for i in range(len(train_tokens)):
rulesByPosition.append(set())
# Mapping somewhere-useful rules to the positions where they apply.
# Then maps each position to the score change the rule generates there.
# (always -1, 0, or 1)
positionsByRule = {}
# Map scores to sets of rules known to achieve *at most* that score.
rulesByScore = {0:{}}
# Conversely, map somewhere-useful rules to their minimal scores.
ruleScores = {}
tagIndices = {} # Lists of indices, mapped to by their tags
# Maps rules to the first index in the corpus where it may not be known
# whether the rule applies. (Rules can't be chosen for inclusion
# unless this value = len(corpus). But most rules are bad, and
# we won't need to check the whole corpus to know that.)
# Some indices past this may actually have been checked; it just isn't
# guaranteed.
firstUnknownIndex = {}
# Make entries in the rule-mapping dictionaries.
# Should be called before _updateRuleApplies.
def _initRule (rule):
positionsByRule[rule] = {}
rulesByScore[0][rule] = None
ruleScores[rule] = 0
firstUnknownIndex[rule] = 0
# Takes a somewhere-useful rule which applies at index i;
# Updates all rule data to reflect that the rule so applies.
def _updateRuleApplies (rule, i):
# If the rule is already known to apply here, ignore.
# (This only happens if the position's tag hasn't changed.)
if positionsByRule[rule].has_key(i):
return
if rule.replacement_tag() == train_tokens[i][1]:
positionsByRule[rule][i] = 1
elif rule.original_tag() == train_tokens[i][1]:
positionsByRule[rule][i] = -1
else: # was wrong, remains wrong
positionsByRule[rule][i] = 0
# Update rules in the other dictionaries
del rulesByScore[ruleScores[rule]][rule]
ruleScores[rule] += positionsByRule[rule][i]
if not rulesByScore.has_key(ruleScores[rule]):
rulesByScore[ruleScores[rule]] = {}
rulesByScore[ruleScores[rule]][rule] = None
rulesByPosition[i].add(rule)
# Takes a rule which no longer applies at index i;
# Updates all rule data to reflect that the rule doesn't apply.
def _updateRuleNotApplies (rule, i):
del rulesByScore[ruleScores[rule]][rule]
ruleScores[rule] -= positionsByRule[rule][i]
if not rulesByScore.has_key(ruleScores[rule]):
rulesByScore[ruleScores[rule]] = {}
rulesByScore[ruleScores[rule]][rule] = None
del positionsByRule[rule][i]
rulesByPosition[i].remove(rule)
# Optional addition: if the rule now applies nowhere, delete
# all its dictionary entries.
tagged_tokens = list(self._initial_tagger.tag(t[0] for t in train_tokens))
# First sort the corpus by tag, and also note where the errors are.
errorIndices = [] # only used in initialization
for i in range(len(tagged_tokens)):
tag = tagged_tokens[i][1]
if tag != train_tokens[i][1]:
errorIndices.append(i)
if not tagIndices.has_key(tag):
tagIndices[tag] = []
tagIndices[tag].append(i)
print "Finding useful rules..."
# Collect all rules that fix any errors, with their positive scores.
for i in errorIndices:
for template in self._templates:
# Find the templated rules that could fix the error.
for rule in template.applicable_rules(tagged_tokens, i,
train_tokens[i][1]):
if not positionsByRule.has_key(rule):
_initRule(rule)
_updateRuleApplies(rule, i)
print "Done initializing %i useful rules." %len(positionsByRule)
if TESTING:
after = -1 # bug-check only
# Each iteration through the loop tries a new maxScore.
maxScore = max(rulesByScore.keys())
rules = []
while len(rules) < max_rules and maxScore >= min_score:
# Find the next best rule. This is done by repeatedly taking a rule with
# the highest score and stepping through the corpus to see where it
# applies. When it makes an error (decreasing its score) it's bumped
# down, and we try a new rule with the highest score.
# When we find a rule which has the highest score AND which has been
# tested against the entire corpus, we can conclude that it's the next
# best rule.
bestRule = None
bestRules = rulesByScore[maxScore].keys()
for rule in bestRules:
# Find the first relevant index at or following the first
# unknown index. (Only check indices with the right tag.)
ti = bisect.bisect_left(tagIndices[rule.original_tag()],
firstUnknownIndex[rule])
for nextIndex in tagIndices[rule.original_tag()][ti:]:
if rule.applies(tagged_tokens, nextIndex):
_updateRuleApplies(rule, nextIndex)
if ruleScores[rule] < maxScore:
firstUnknownIndex[rule] = nextIndex+1
break # the _update demoted the rule
# If we checked all remaining indices and found no more errors:
if ruleScores[rule] == maxScore:
firstUnknownIndex[rule] = len(tagged_tokens) # i.e., we checked them all
print "%i) %s (score: %i)" %(len(rules)+1, rule, maxScore)
bestRule = rule
break
if bestRule == None: # all rules dropped below maxScore
del rulesByScore[maxScore]
maxScore = max(rulesByScore.keys())
continue # with next-best rules
# bug-check only
if TESTING:
before = len(_errorPositions(tagged_tokens, train_tokens))
print "There are %i errors before applying this rule." %before
assert after == -1 or before == after, \
"after=%i but before=%i" %(after,before)
print "Applying best rule at %i locations..." \
%len(positionsByRule[bestRule].keys())
# If we reach this point, we've found a new best rule.
# Apply the rule at the relevant sites.
# (apply_at is a little inefficient here, since we know the rule applies
# and don't actually need to test it again.)
rules.append(bestRule)
bestRule.apply_at(tagged_tokens, positionsByRule[bestRule].keys())
# Update the tag index accordingly.
for i in positionsByRule[bestRule].keys(): # where it applied
# Update positions of tags
# First, find and delete the index for i from the old tag.
oldIndex = bisect.bisect_left(tagIndices[bestRule.original_tag()], i)
del tagIndices[bestRule.original_tag()][oldIndex]
# Then, insert i into the index list of the new tag.
if not tagIndices.has_key(bestRule.replacement_tag()):
tagIndices[bestRule.replacement_tag()] = []
newIndex = bisect.bisect_left(tagIndices[bestRule.replacement_tag()], i)
tagIndices[bestRule.replacement_tag()].insert(newIndex, i)
# This part is tricky.
# We need to know which sites might now require new rules -- that
# is, which sites are close enough to the changed site so that
# a template might now generate different rules for it.
# Only the templates can know this.
#
# If a template now generates a different set of rules, we have
# to update our indices to reflect that.
print "Updating neighborhoods of changed sites.\n"
# First, collect all the indices that might get new rules.
neighbors = set()
for i in positionsByRule[bestRule].keys(): # sites changed
for template in self._templates:
neighbors.update(template.get_neighborhood(tagged_tokens, i))
# Then collect the new set of rules for each such index.
c = d = e = 0
for i in neighbors:
siteRules = set()
for template in self._templates:
# Get a set of the rules that the template now generates
siteRules.update(set(template.applicable_rules(
tagged_tokens, i, train_tokens[i][1])))
# Update rules no longer generated here by any template
for obsolete in rulesByPosition[i] - siteRules:
c += 1
_updateRuleNotApplies(obsolete, i)
# Update rules only now generated by this template
for newRule in siteRules - rulesByPosition[i]:
d += 1
if not positionsByRule.has_key(newRule):
e += 1
_initRule(newRule) # make a new rule w/score=0
_updateRuleApplies(newRule, i) # increment score, etc.
if TESTING:
after = before - maxScore
print "%i obsolete rule applications, %i new ones, " %(c,d)+ \
"using %i previously-unseen rules." %e
maxScore = max(rulesByScore.keys()) # may have gone up
if self._trace > 0: print ("Training Brill tagger on %d tokens..." %
len(train_tokens))
# Maintain a list of the rules that apply at each position.
rules_by_position = [{} for tok in train_tokens]
# Create and return a tagger from the rules we found.
return Brill(self._initial_tagger, rules)
######################################################################
## Testing
######################################################################
def _errorPositions (train_tokens, tokens):
return [i for i in range(len(tokens))
if tokens[i][1] !=
train_tokens[i][1] ]
# returns a list of errors in string format
def errorList (train_tokens, tokens, radius=2):
"""
Returns a list of human-readable strings indicating the errors in the
given tagging of the corpus.
@param train_tokens: The correct tagging of the corpus
@type train_tokens: C{list} of C{tuple}
@param tokens: The tagged corpus
@type tokens: C{list} of C{tuple}
@param radius: How many tokens on either side of a wrongly-tagged token
to include in the error string. For example, if C{radius}=2, each error
string will show the incorrect token plus two tokens on either side.
@type radius: int
"""
errors = []
indices = _errorPositions(train_tokens, tokens)
tokenLen = len(tokens)
for i in indices:
ei = tokens[i][1].rjust(3) + " -> " \
+ train_tokens[i][1].rjust(3) + ": "
for j in range( max(i-radius, 0), min(i+radius+1, tokenLen) ):
if tokens[j][0] == tokens[j][1]:
s = tokens[j][0] # don't print punctuation tags
else:
s = tokens[j][0] + "/" + tokens[j][1]
if j == i:
ei += "**"+s+"** "
else:
ei += s + " "
errors.append(ei)
return errors
#####################################################################################
# Demonstration
#####################################################################################
def demo(num_sents=100, max_rules=200, min_score=2, error_output = "errors.out",
rule_output="rules.out", randomize=False, train=.8, trace=3):
"""
Brill Tagger Demonstration
@param num_sents: how many sentences of training and testing data to use
@type num_sents: L{int}
@param max_rules: maximum number of rule instances to create
@type max_rules: L{int}
@param min_score: the minimum score for a rule in order for it to be considered
@type min_score: L{int}
@param error_output: the file where errors will be saved
@type error_output: L{string}
@param rule_output: the file where rules will be saved
@type rule_output: L{string}
@param randomize: whether the training data should be a random subset of the corpus
@type randomize: L{boolean}
@param train: the fraction of the the corpus to be used for training (1=all)
@type train: L{float}
@param trace: the level of diagnostic tracing output to produce (0-3)
@type train: L{int}
"""
from en.parser.nltk_lite.corpora import treebank
from en.parser.nltk_lite import tag
from en.parser.nltk_lite.tag import brill
NN_CD_tagger = tag.Regexp([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')])
# train is the proportion of data used in training; the rest is reserved
# for testing.
print "Loading tagged data..."
sents = list(treebank.tagged())
if randomize:
random.seed(len(sents))
random.shuffle(sents)
tagged_data = [t for s in sents[:num_sents] for t in s]
cutoff = int(len(tagged_data)*train)
training_data = tagged_data[:cutoff]
gold_data = tagged_data[cutoff:]
testing_data = [t[0] for t in gold_data]
# Unigram tagger
print "Training unigram tagger:",
u = tag.Unigram(backoff=NN_CD_tagger)
# NB training and testing are required to use a list-of-lists structure,
# so we wrap the flattened corpus data with the extra list structure.
u.train([training_data])
print("[accuracy: %f]" % tag.accuracy(u, [gold_data]))
# Brill tagger
templates = [
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,1)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (2,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateTagsRule, (1,3)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,1)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (2,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,2)),
brill.SymmetricProximateTokensTemplate(brill.ProximateWordsRule, (1,3)),
brill.ProximateTokensTemplate(brill.ProximateTagsRule, (-1, -1), (1,1)),
brill.ProximateTokensTemplate(brill.ProximateWordsRule, (-1, -1), (1,1)),
]
#trainer = brill.FastBrillTrainer(u, templates, trace)
trainer = brill.BrillTrainer(u, templates, trace)
b = trainer.train(training_data, max_rules, min_score)
print
print("Brill accuracy: %f" % tag.accuracy(b, [gold_data]))
print("\nRules: ")
printRules = file(rule_output, 'w')
for rule in b.rules():
print(str(rule))
printRules.write(str(rule)+"\n\n")
testing_data = list(b.tag(testing_data))
el = errorList(gold_data, testing_data)
errorFile = file(error_output, 'w')
for e in el:
errorFile.write(e+"\n\n")
errorFile.close()
print "Done; rules and errors saved to %s and %s." % (rule_output, error_output)
if __name__ == '__main__':
demo()
|
dzamie/weasyl | refs/heads/master | libweasyl/libweasyl/text.py | 1 | from __future__ import unicode_literals
import re
from lxml import etree, html
import misaka
from .compat import unicode
from .defang import defang
from .legacy import login_name
try:
from html.parser import locatestarttagend
except ImportError:
try:
from html.parser import locatestarttagend_tolerant as locatestarttagend
except ImportError:
from HTMLParser import locatestarttagend
def slug_for(title):
title = title.replace("&", " and ")
return "-".join(m.group(0) for m in re.finditer(r"[a-z0-9]+", title.lower()))
AUTOLINK_URL = (
r"(?P<url>(?isu)\b(?:https?://|www\d{,3}\.|[a-z0-9.-]+\.[a-z]{2,4}/)[^\s()"
r"<>\[\]\x02]+(?![^\s`!()\[\]{};:'\".,<>?\x02\xab\xbb\u201c\u201d\u2018"
r"\u2019]))"
)
url_regexp = re.compile(AUTOLINK_URL)
USER_LINK = re.compile(r"""
\\(?P<escaped>[\\<])
| <(?P<type>!~|[!~])(?P<username>[a-z0-9_]+)>
| .
""", re.I | re.X)
NON_USERNAME_CHARACTERS = re.compile("[^a-z0-9]+", re.I)
def _furaffinity(target):
return "".join(i for i in target if i not in "!#_" and not i.isspace()).lower()
def _inkbunny(target):
return target.lower()
def _deviantart(target):
return "".join(i for i in target if i != "." and not i.isspace()).lower()
def _sofurry(target):
return NON_USERNAME_CHARACTERS.sub("-", target).lstrip("-").lower()
MISAKA_EXT = (
misaka.EXT_TABLES
| misaka.EXT_FENCED_CODE
| misaka.EXT_AUTOLINK
| misaka.EXT_STRIKETHROUGH
| misaka.EXT_NO_INTRA_EMPHASIS
| misaka.EXT_LAX_SPACING
| misaka.EXT_NO_INDENTED_CODE_BLOCKS)
MISAKA_FORMAT = (
misaka.HTML_HARD_WRAP)
def strip_outer_tag(html):
match = locatestarttagend.match(html)
start_tag_end = match.end()
end_tag_start = html.rindex(u'<')
return html[:start_tag_end + 1], html[start_tag_end + 1:end_tag_start], html[end_tag_start:]
class WeasylRenderer(misaka.HtmlRenderer):
# Render Markdown in HTML
def block_html(self, raw_html):
if raw_html.startswith('<!--'):
return raw_html
start, stripped, end = strip_outer_tag(raw_html)
return u''.join([start, _markdown(stripped).rstrip(), end])
# Respect start of ordered lists
def list(self, text, ordered, prefix):
if prefix:
return '<ol start="{start}">{text}</ol>'.format(
start=prefix,
text=text,
)
else:
return '<ul>{text}</ul>'.format(
text=text,
)
def _markdown(target):
renderer = WeasylRenderer(MISAKA_FORMAT)
markdown = misaka.Markdown(renderer, MISAKA_EXT)
return markdown.render(target)
def create_link(t, username):
link = etree.Element(u"a")
link.set(u"href", u"/~" + login_name(username))
if t == "~":
link.text = username
else:
link.set(u"class", u"user-icon")
image = etree.SubElement(link, u"img")
image.set(u"src", u"/~{username}/avatar".format(username=login_name(username)))
image.set(u"alt", username)
if t != "!":
label = etree.SubElement(link, u"span")
label.text = username
image.tail = u" "
return link
def add_user_links(fragment, parent, can_contain):
_nonlocal = {}
def add_matches(text, got_link):
for m in USER_LINK.finditer(text):
escaped, t, username = m.group("escaped", "type", "username")
if escaped:
previous_text.append(escaped)
continue
if not t:
previous_text.append(m.group())
continue
got_link(t, username)
def got_text_link(t, username):
previous = _nonlocal["previous"]
if previous is None:
fragment.text = "".join(previous_text)
else:
previous.tail = "".join(previous_text)
del previous_text[:]
link = create_link(t, username)
fragment.insert(_nonlocal["insert_index"], link)
_nonlocal["insert_index"] += 1
_nonlocal["previous"] = link
def got_tail_link(t, username):
_nonlocal["previous"].tail = "".join(previous_text)
del previous_text[:]
_nonlocal["insert_index"] += 1
link = create_link(t, username)
parent.insert(_nonlocal["insert_index"], link)
_nonlocal["previous"] = link
if can_contain:
for child in list(fragment):
child_can_contain = child.tag not in ("a", "pre", "code")
add_user_links(child, fragment, child_can_contain)
if fragment.text:
_nonlocal["previous"] = None
_nonlocal["insert_index"] = 0
previous_text = []
add_matches(fragment.text, got_text_link)
previous = _nonlocal["previous"]
if previous is None:
fragment.text = "".join(previous_text)
else:
previous.tail = "".join(previous_text)
if fragment.tail:
_nonlocal["previous"] = fragment
_nonlocal["insert_index"] = list(parent).index(fragment)
previous_text = []
add_matches(fragment.tail, got_tail_link)
_nonlocal["previous"].tail = "".join(previous_text)
def markdown(target, image=False):
if target is None:
return ""
if not image:
images_left = 0
elif type(image) is int:
images_left = image
else:
images_left = 5
rendered = _markdown(target)
fragment = html.fragment_fromstring(rendered, create_parent=True)
for link in fragment.findall(".//a"):
href = link.attrib.get("href")
if href:
t, _, user = href.partition(":")
if t == "user":
link.attrib["href"] = u"/~{user}".format(user=login_name(user))
elif t == "da":
link.attrib["href"] = u"https://{user}.deviantart.com/".format(user=_deviantart(user))
elif t == "ib":
link.attrib["href"] = u"https://inkbunny.net/{user}".format(user=_inkbunny(user))
elif t == "fa":
link.attrib["href"] = u"https://www.furaffinity.net/user/{user}".format(user=_furaffinity(user))
elif t == "sf":
link.attrib["href"] = u"https://{user}.sofurry.com/".format(user=_sofurry(user))
else:
continue
if not link.text or link.text == href:
link.text = user
for parent in fragment.findall(".//*[img]"):
for image in list(parent):
if image.tag != "img":
continue
src = image.get("src")
if src:
t, _, user = src.partition(":")
if t != "user":
if images_left:
images_left -= 1
else:
i = list(parent).index(image)
link = etree.Element(u"a")
link.tail = image.tail
src = image.get("src")
if src:
link.set(u"href", src)
link.text = image.attrib.get("alt", src)
parent[i] = link
continue
image.set(u"src", u"/~{user}/avatar".format(user=login_name(user)))
link = etree.Element(u"a")
link.set(u"href", u"/~{user}".format(user=login_name(user)))
link.set(u"class", u"user-icon")
parent.insert(list(parent).index(image), link)
parent.remove(image)
link.append(image)
link.tail = image.tail
if "alt" in image.attrib and image.attrib["alt"]:
image.tail = u" "
label = etree.SubElement(link, u"span")
label.text = image.attrib["alt"]
del image.attrib["alt"]
else:
image.tail = None
image.set(u"alt", user)
add_user_links(fragment, None, True)
defang(fragment)
return html.tostring(fragment, encoding=unicode)[5:-6] # <div>...</div>
def markdown_link(title, url):
title = title.replace('[', '\\[').replace(']', '\\]')
return '[%s](%s)' % (title, url)
|
ChuanleiGuo/AlgorithmsPlayground | refs/heads/master | LeetCodeSolutions/python/101_Symmetric_Tree.py | 1 | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def is_S(left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val != right.val:
return False
return is_S(left.left, right.right) and is_S(right.left, left.right)
if root is None:
return True
return is_S(root.left, root.right)
|
leonardbj/AIMS | refs/heads/master | src/exec/sp_lab.py | 1 | """ Description here
Author: Leonard Berrada
Date: 2 Nov 2015
"""
import sys
sys.path.append("../")
try:
import seaborn as sns
sns.set(color_codes=True)
except:
pass
from process_data import data_from_file
from Regression import AutoRegressive, AutoCorrelation
# file_name = "finPredProb.mat"
# file_name = "co2.mat"
# file_name = "sunspots.mat"
# file_name = "mg.mat"
file_name = "fXSamples.mat"
ix = 1
p = 5
args = data_from_file(file_name,
ix=ix)
my_ar = AutoRegressive(*args, p=p)
my_ar.fit()
my_ar.predict()
# my_ar.plot_var('ypred')
my_ac = AutoCorrelation(*args, p=p)
my_ac.fit()
my_ac.predict()
# my_ac.plot_var('ypred', show=True)
my_ac.spectrum()
my_ac.plot_attr('spectrum', show=True)
|
gyroninja/tnoodle | refs/heads/master | svglite/tmtproject.py | 4 | import tmt
DESCRIPTION = "A dead simple svg generation library written in pure Java, with no dependencies. This code runs on both desktop Java, Android, and compiles to Javascript with GWT."
tmt.EclipseProject(tmt.projectName(), description=DESCRIPTION)
|
fheinle/Photoblog | refs/heads/master | gdata/tlslite/BaseDB.py | 238 | """Base class for SharedKeyDB and VerifierDB."""
import anydbm
import thread
class BaseDB:
def __init__(self, filename, type):
self.type = type
self.filename = filename
if self.filename:
self.db = None
else:
self.db = {}
self.lock = thread.allocate_lock()
def create(self):
"""Create a new on-disk database.
@raise anydbm.error: If there's a problem creating the database.
"""
if self.filename:
self.db = anydbm.open(self.filename, "n") #raises anydbm.error
self.db["--Reserved--type"] = self.type
self.db.sync()
else:
self.db = {}
def open(self):
"""Open a pre-existing on-disk database.
@raise anydbm.error: If there's a problem opening the database.
@raise ValueError: If the database is not of the right type.
"""
if not self.filename:
raise ValueError("Can only open on-disk databases")
self.db = anydbm.open(self.filename, "w") #raises anydbm.error
try:
if self.db["--Reserved--type"] != self.type:
raise ValueError("Not a %s database" % self.type)
except KeyError:
raise ValueError("Not a recognized database")
def __getitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
valueStr = self.db[username]
finally:
self.lock.release()
return self._getItem(username, valueStr)
def __setitem__(self, username, value):
if self.db == None:
raise AssertionError("DB not open")
valueStr = self._setItem(username, value)
self.lock.acquire()
try:
self.db[username] = valueStr
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __delitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
del(self.db[username])
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __contains__(self, username):
"""Check if the database contains the specified username.
@type username: str
@param username: The username to check for.
@rtype: bool
@return: True if the database contains the username, False
otherwise.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
return self.db.has_key(username)
finally:
self.lock.release()
def check(self, username, param):
value = self.__getitem__(username)
return self._checkItem(value, username, param)
def keys(self):
"""Return a list of usernames in the database.
@rtype: list
@return: The usernames in the database.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
usernames = self.db.keys()
finally:
self.lock.release()
usernames = [u for u in usernames if not u.startswith("--Reserved--")]
return usernames |
jjmiranda/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/tests/test_capa_module.py | 3 | # -*- coding: utf-8 -*-
"""
Tests of the Capa XModule
"""
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import datetime
import json
import random
import os
import textwrap
import unittest
import ddt
from lxml import etree
from mock import Mock, patch, DEFAULT
import webob
from webob.multidict import MultiDict
import xmodule
from xmodule.tests import DATA_DIR
from capa import responsetypes
from capa.responsetypes import (StudentInputError, LoncapaProblemError,
ResponseError)
from capa.xqueue_interface import XQueueInterface
from xmodule.capa_module import CapaModule, CapaDescriptor, ComplexEncoder
from opaque_keys.edx.locations import Location
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from . import get_test_system
from pytz import UTC
from capa.correctmap import CorrectMap
from ..capa_base_constants import RANDOMIZATION
class CapaFactory(object):
"""
A helper class to create problem modules with various parameters for testing.
"""
sample_problem_xml = textwrap.dedent("""\
<?xml version="1.0"?>
<problem>
<text>
<p>What is pi, to two decimal places?</p>
</text>
<numericalresponse answer="3.14">
<textline math="1" size="30"/>
</numericalresponse>
</problem>
""")
num = 0
@classmethod
def next_num(cls):
cls.num += 1
return cls.num
@classmethod
def input_key(cls, response_num=2, input_num=1):
"""
Return the input key to use when passing GET parameters
"""
return "input_" + cls.answer_key(response_num, input_num)
@classmethod
def answer_key(cls, response_num=2, input_num=1):
"""
Return the key stored in the capa problem answer dict
"""
return (
"%s_%d_%d" % (
"-".join(['i4x', 'edX', 'capa_test', 'problem', 'SampleProblem%d' % cls.num]),
response_num,
input_num
)
)
@classmethod
def create(cls,
attempts=None,
problem_state=None,
correct=False,
xml=None,
override_get_score=True,
**kwargs
):
"""
All parameters are optional, and are added to the created problem if specified.
Arguments:
graceperiod:
due:
max_attempts:
showanswer:
force_save_button:
rerandomize: all strings, as specified in the policy for the problem
problem_state: a dict to to be serialized into the instance_state of the
module.
attempts: also added to instance state. Will be converted to an int.
"""
location = Location(
"edX",
"capa_test",
"2012_Fall",
"problem",
"SampleProblem{0}".format(cls.next_num()),
None
)
if xml is None:
xml = cls.sample_problem_xml
field_data = {'data': xml}
field_data.update(kwargs)
descriptor = Mock(weight="1")
if problem_state is not None:
field_data.update(problem_state)
if attempts is not None:
# converting to int here because I keep putting "0" and "1" in the tests
# since everything else is a string.
field_data['attempts'] = int(attempts)
system = get_test_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
module = CapaModule(
descriptor,
system,
DictFieldData(field_data),
ScopeIds(None, None, location, location),
)
if override_get_score:
if correct:
# TODO: probably better to actually set the internal state properly, but...
module.get_score = lambda: {'score': 1, 'total': 1}
else:
module.get_score = lambda: {'score': 0, 'total': 1}
return module
class CapaFactoryWithFiles(CapaFactory):
"""
A factory for creating a Capa problem with files attached.
"""
sample_problem_xml = textwrap.dedent("""\
<problem>
<coderesponse queuename="BerkeleyX-cs188x">
<!-- actual filenames here don't matter for server-side tests,
they are only acted upon in the browser. -->
<filesubmission
points="25"
allowed_files="prog1.py prog2.py prog3.py"
required_files="prog1.py prog2.py prog3.py"
/>
<codeparam>
<answer_display>
If you're having trouble with this Project,
please refer to the Lecture Slides and attend office hours.
</answer_display>
<grader_payload>{"project": "p3"}</grader_payload>
</codeparam>
</coderesponse>
<customresponse>
<text>
If you worked with a partner, enter their username or email address. If you
worked alone, enter None.
</text>
<textline points="0" size="40" correct_answer="Your partner's username or 'None'"/>
<answer type="loncapa/python">
correct=['correct']
s = str(submission[0]).strip()
if submission[0] == '':
correct[0] = 'incorrect'
</answer>
</customresponse>
</problem>
""")
@ddt.ddt
class CapaModuleTest(unittest.TestCase):
def setUp(self):
super(CapaModuleTest, self).setUp()
now = datetime.datetime.now(UTC)
day_delta = datetime.timedelta(days=1)
self.yesterday_str = str(now - day_delta)
self.today_str = str(now)
self.tomorrow_str = str(now + day_delta)
# in the capa grace period format, not in time delta format
self.two_day_delta_str = "2 days"
def test_import(self):
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
other_module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
self.assertNotEqual(module.url_name, other_module.url_name,
"Factory should be creating unique names for each problem")
def test_correct(self):
"""
Check that the factory creates correct and incorrect problems properly.
"""
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
other_module = CapaFactory.create(correct=True)
self.assertEqual(other_module.get_score()['score'], 1)
def test_get_score(self):
"""
Do 1 test where the internals of get_score are properly set
@jbau Note: this obviously depends on a particular implementation of get_score, but I think this is actually
useful as unit-code coverage for this current implementation. I don't see a layer where LoncapaProblem
is tested directly
"""
from capa.correctmap import CorrectMap
student_answers = {'1_2_1': 'abcd'}
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=0.9)
module = CapaFactory.create(correct=True, override_get_score=False)
module.lcp.correct_map = correct_map
module.lcp.student_answers = student_answers
self.assertEqual(module.get_score()['score'], 0.9)
other_correct_map = CorrectMap(answer_id='1_2_1', correctness="incorrect", npoints=0.1)
other_module = CapaFactory.create(correct=False, override_get_score=False)
other_module.lcp.correct_map = other_correct_map
other_module.lcp.student_answers = student_answers
self.assertEqual(other_module.get_score()['score'], 0.1)
def test_showanswer_default(self):
"""
Make sure the show answer logic does the right thing.
"""
# default, no due date, showanswer 'closed', so problem is open, and show_answer
# not visible.
problem = CapaFactory.create()
self.assertFalse(problem.answer_available())
def test_showanswer_attempted(self):
problem = CapaFactory.create(showanswer='attempted')
self.assertFalse(problem.answer_available())
problem.attempts = 1
self.assertTrue(problem.answer_available())
def test_showanswer_closed(self):
# can see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertTrue(used_all_attempts.answer_available())
# can see after due date
after_due_date = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(after_due_date.answer_available())
# can't see because attempts left
attempts_left_open = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# Can't see because grace period hasn't expired
still_in_grace = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_correct_or_past_due(self):
"""
With showanswer="correct_or_past_due" should show answer after the answer is correct
or after the problem is closed for everyone--e.g. after due date + grace period.
"""
# can see because answer is correct, even with due date in the future
answer_correct = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.tomorrow_str,
correct=True)
self.assertTrue(answer_correct.answer_available())
# can see after due date, even when answer isn't correct
past_due_date = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can also see after due date when answer _is_ correct
past_due_date_correct = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str,
correct=True)
self.assertTrue(past_due_date_correct.answer_available())
# Can't see because grace period hasn't expired and answer isn't correct
still_in_grace = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_past_due(self):
"""
With showanswer="past_due" should only show answer after the problem is closed
for everyone--e.g. after due date + grace period.
"""
# can't see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertFalse(used_all_attempts.answer_available())
# can see after due date
past_due_date = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can't see because attempts left
attempts_left_open = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# Can't see because grace period hasn't expired, even though have no more
# attempts.
still_in_grace = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_finished(self):
"""
With showanswer="finished" should show answer after the problem is closed,
or after the answer is correct.
"""
# can see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertTrue(used_all_attempts.answer_available())
# can see after due date
past_due_date = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can't see because attempts left and wrong
attempts_left_open = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# _can_ see because attempts left and right
correct_ans = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.tomorrow_str,
correct=True)
self.assertTrue(correct_ans.answer_available())
# Can see even though grace period hasn't expired, because have no more
# attempts.
still_in_grace = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertTrue(still_in_grace.answer_available())
def test_closed(self):
# Attempts < Max attempts --> NOT closed
module = CapaFactory.create(max_attempts="1", attempts="0")
self.assertFalse(module.closed())
# Attempts < Max attempts --> NOT closed
module = CapaFactory.create(max_attempts="2", attempts="1")
self.assertFalse(module.closed())
# Attempts = Max attempts --> closed
module = CapaFactory.create(max_attempts="1", attempts="1")
self.assertTrue(module.closed())
# Attempts > Max attempts --> closed
module = CapaFactory.create(max_attempts="1", attempts="2")
self.assertTrue(module.closed())
# Max attempts = 0 --> closed
module = CapaFactory.create(max_attempts="0", attempts="2")
self.assertTrue(module.closed())
# Past due --> closed
module = CapaFactory.create(max_attempts="1", attempts="0",
due=self.yesterday_str)
self.assertTrue(module.closed())
def test_parse_get_params(self):
# Valid GET param dict
# 'input_5' intentionally left unset,
valid_get_dict = MultiDict({
'input_1': 'test',
'input_1_2': 'test',
'input_1_2_3': 'test',
'input_[]_3': 'test',
'input_4': None,
'input_6': 5
})
result = CapaModule.make_dict_of_responses(valid_get_dict)
# Expect that we get a dict with "input" stripped from key names
# and that we get the same values back
for key in result.keys():
original_key = "input_" + key
self.assertIn(original_key, valid_get_dict, "Output dict should have key %s" % original_key)
self.assertEqual(valid_get_dict[original_key], result[key])
# Valid GET param dict with list keys
# Each tuple represents a single parameter in the query string
valid_get_dict = MultiDict((('input_2[]', 'test1'), ('input_2[]', 'test2')))
result = CapaModule.make_dict_of_responses(valid_get_dict)
self.assertIn('2', result)
self.assertEqual(['test1', 'test2'], result['2'])
# If we use [] at the end of a key name, we should always
# get a list, even if there's just one value
valid_get_dict = MultiDict({'input_1[]': 'test'})
result = CapaModule.make_dict_of_responses(valid_get_dict)
self.assertEqual(result['1'], ['test'])
# If we have no underscores in the name, then the key is invalid
invalid_get_dict = MultiDict({'input': 'test'})
with self.assertRaises(ValueError):
result = CapaModule.make_dict_of_responses(invalid_get_dict)
# Two equivalent names (one list, one non-list)
# One of the values would overwrite the other, so detect this
# and raise an exception
invalid_get_dict = MultiDict({'input_1[]': 'test 1',
'input_1': 'test 2'})
with self.assertRaises(ValueError):
result = CapaModule.make_dict_of_responses(invalid_get_dict)
def test_check_problem_correct(self):
module = CapaFactory.create(attempts=1)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching CorrectMap.is_correct()
# Also simulate rendering the HTML
# TODO: pep8 thinks the following line has invalid syntax
with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct, \
patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
mock_is_correct.return_value = True
mock_html.return_value = "Test HTML"
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get the (mocked) HTML
self.assertEqual(result['contents'], 'Test HTML')
# Expect that the number of attempts is incremented by 1
self.assertEqual(module.attempts, 2)
def test_check_problem_incorrect(self):
module = CapaFactory.create(attempts=0)
# Simulate marking the input incorrect
with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct:
mock_is_correct.return_value = False
# Check the problem
get_request_dict = {CapaFactory.input_key(): '0'}
result = module.check_problem(get_request_dict)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is incremented by 1
self.assertEqual(module.attempts, 1)
def test_check_problem_closed(self):
module = CapaFactory.create(attempts=3)
# Problem closed -- cannot submit
# Simulate that CapaModule.closed() always returns True
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
with self.assertRaises(xmodule.exceptions.NotFoundError):
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that number of attempts NOT incremented
self.assertEqual(module.attempts, 3)
@ddt.data(
RANDOMIZATION.ALWAYS,
'true'
)
def test_check_problem_resubmitted_with_randomize(self, rerandomize):
# Randomize turned on
module = CapaFactory.create(rerandomize=rerandomize, attempts=0)
# Simulate that the problem is completed
module.done = True
# Expect that we cannot submit
with self.assertRaises(xmodule.exceptions.NotFoundError):
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that number of attempts NOT incremented
self.assertEqual(module.attempts, 0)
@ddt.data(
RANDOMIZATION.NEVER,
'false',
RANDOMIZATION.PER_STUDENT
)
def test_check_problem_resubmitted_no_randomize(self, rerandomize):
# Randomize turned off
module = CapaFactory.create(rerandomize=rerandomize, attempts=0, done=True)
# Expect that we can submit successfully
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
self.assertEqual(result['success'], 'correct')
# Expect that number of attempts IS incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_queued(self):
module = CapaFactory.create(attempts=1)
# Simulate that the problem is queued
multipatch = patch.multiple(
'capa.capa_problem.LoncapaProblem',
is_queued=DEFAULT,
get_recentmost_queuetime=DEFAULT
)
with multipatch as values:
values['is_queued'].return_value = True
values['get_recentmost_queuetime'].return_value = datetime.datetime.now(UTC)
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertIn('You must wait', result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_with_files(self):
# Check a problem with uploaded files, using the check_problem API.
# pylint: disable=protected-access
# The files we'll be uploading.
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
module = CapaFactoryWithFiles.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok"))
module.system.xqueue['interface'] = xqueue_interface
# Create a request dictionary for check_problem.
get_request_dict = {
CapaFactoryWithFiles.input_key(response_num=2): fileobjs,
CapaFactoryWithFiles.input_key(response_num=3): 'None',
}
module.check_problem(get_request_dict)
# _http_post is called like this:
# _http_post(
# 'http://example.com/xqueue/xqueue/submit/',
# {
# 'xqueue_header': '{"lms_key": "df34fb702620d7ae892866ba57572491", "lms_callback_url": "/", "queue_name": "BerkeleyX-cs188x"}',
# 'xqueue_body': '{"student_info": "{\\"anonymous_student_id\\": \\"student\\", \\"submission_time\\": \\"20131117183318\\"}", "grader_payload": "{\\"project\\": \\"p3\\"}", "student_response": ""}',
# },
# files={
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/asset.html'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/asset.html', mode 'r' at 0x49c5f60>,
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/image.jpg'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/image.jpg', mode 'r' at 0x49c56f0>,
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/textbook.pdf'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/textbook.pdf', mode 'r' at 0x49c5a50>,
# },
# )
self.assertEqual(xqueue_interface._http_post.call_count, 1)
_, kwargs = xqueue_interface._http_post.call_args
self.assertItemsEqual(fpaths, kwargs['files'].keys())
for fpath, fileobj in kwargs['files'].iteritems():
self.assertEqual(fpath, fileobj.name)
def test_check_problem_with_files_as_xblock(self):
# Check a problem with uploaded files, using the XBlock API.
# pylint: disable=protected-access
# The files we'll be uploading.
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
module = CapaFactoryWithFiles.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok"))
module.system.xqueue['interface'] = xqueue_interface
# Create a webob Request with the files uploaded.
post_data = []
for fname, fileobj in zip(fnames, fileobjs):
post_data.append((CapaFactoryWithFiles.input_key(response_num=2), (fname, fileobj)))
post_data.append((CapaFactoryWithFiles.input_key(response_num=3), 'None'))
request = webob.Request.blank("/some/fake/url", POST=post_data, content_type='multipart/form-data')
module.handle('xmodule_handler', request, 'problem_check')
self.assertEqual(xqueue_interface._http_post.call_count, 1)
_, kwargs = xqueue_interface._http_post.call_args
self.assertItemsEqual(fnames, kwargs['files'].keys())
for fpath, fileobj in kwargs['files'].iteritems():
self.assertEqual(fpath, fileobj.name)
def test_check_problem_error(self):
# Try each exception that capa_module should handle
exception_classes = [StudentInputError,
LoncapaProblemError,
ResponseError]
for exception_class in exception_classes:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class('test error')
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
expected_msg = 'Error: test error'
self.assertEqual(expected_msg, result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_other_errors(self):
"""
Test that errors other than the expected kinds give an appropriate message.
See also `test_check_problem_error` for the "expected kinds" or errors.
"""
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Ensure that DEBUG is on
module.system.DEBUG = True
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
error_msg = u"Superterrible error happened: ☠"
mock_grade.side_effect = Exception(error_msg)
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertIn(error_msg, result['success'])
def test_check_problem_zero_max_grade(self):
"""
Test that a capa problem with a max grade of zero doesn't generate an error.
"""
# Create the module
module = CapaFactory.create(attempts=1)
# Override the problem score to have a total of zero.
module.lcp.get_score = lambda: {'score': 0, 'total': 0}
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
def test_check_problem_error_nonascii(self):
# Try each exception that capa_module should handle
exception_classes = [StudentInputError,
LoncapaProblemError,
ResponseError]
for exception_class in exception_classes:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class(u"ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ")
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ'
self.assertEqual(expected_msg, result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_error_with_staff_user(self):
# Try each exception that capa module should handle
for exception_class in [StudentInputError,
LoncapaProblemError,
ResponseError]:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user IS staff
module.system.user_is_staff = True
# Simulate answering a problem that raises an exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class('test error')
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertIn('test error', result['success'])
# We DO include traceback information for staff users
self.assertIn('Traceback', result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_reset_problem(self):
module = CapaFactory.create(done=True)
module.new_lcp = Mock(wraps=module.new_lcp)
module.choose_new_seed = Mock(wraps=module.choose_new_seed)
# Stub out HTML rendering
with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
mock_html.return_value = "<div>Test HTML</div>"
# Reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the request was successful
self.assertTrue('success' in result and result['success'])
# Expect that the problem HTML is retrieved
self.assertIn('html', result)
self.assertEqual(result['html'], "<div>Test HTML</div>")
# Expect that the problem was reset
module.new_lcp.assert_called_once_with(None)
def test_reset_problem_closed(self):
# pre studio default
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS)
# Simulate that the problem is closed
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
# Try to reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_reset_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_rescore_problem_correct(self):
module = CapaFactory.create(attempts=1, done=True)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.rescore_problem()
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get no HTML
self.assertNotIn('contents', result)
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_incorrect(self):
# make sure it also works when attempts have been reset,
# so add this to the test:
module = CapaFactory.create(attempts=0, done=True)
# Simulate that all answers are marked incorrect, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem()
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem()
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
# Try to rescore the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.rescore_problem()
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
# Create the module
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
self.assertEqual(result['success'], expected_msg)
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_student_input_error(self):
self._rescore_problem_error_helper(StudentInputError)
def test_rescore_problem_problem_error(self):
self._rescore_problem_error_helper(LoncapaProblemError)
def test_rescore_problem_response_error(self):
self._rescore_problem_error_helper(ResponseError)
def test_save_problem(self):
module = CapaFactory.create(done=False)
# Save the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that answers are saved to the problem
expected_answers = {CapaFactory.answer_key(): '3.14'}
self.assertEqual(module.lcp.student_answers, expected_answers)
# Expect that the result is success
self.assertTrue('success' in result and result['success'])
def test_save_problem_closed(self):
module = CapaFactory.create(done=False)
# Simulate that the problem is closed
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
# Try to save the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that the result is failure
self.assertTrue('success' in result and not result['success'])
@ddt.data(
RANDOMIZATION.ALWAYS,
'true'
)
def test_save_problem_submitted_with_randomize(self, rerandomize):
# Capa XModule treats 'always' and 'true' equivalently
module = CapaFactory.create(rerandomize=rerandomize, done=True)
# Try to save
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that we cannot save
self.assertTrue('success' in result and not result['success'])
@ddt.data(
RANDOMIZATION.NEVER,
'false',
RANDOMIZATION.PER_STUDENT
)
def test_save_problem_submitted_no_randomize(self, rerandomize):
# Capa XModule treats 'false' and 'per_student' equivalently
module = CapaFactory.create(rerandomize=rerandomize, done=True)
# Try to save
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that we succeed
self.assertTrue('success' in result and result['success'])
def test_check_button_name(self):
# If last attempt, button name changes to "Final Check"
# Just in case, we also check what happens if we have
# more attempts than allowed.
attempts = random.randint(1, 10)
module = CapaFactory.create(attempts=attempts - 1, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
module = CapaFactory.create(attempts=attempts, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
module = CapaFactory.create(attempts=attempts + 1, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
# Otherwise, button name is "Check"
module = CapaFactory.create(attempts=attempts - 2, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Check")
module = CapaFactory.create(attempts=attempts - 3, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Check")
# If no limit on attempts, then always show "Check"
module = CapaFactory.create(attempts=attempts - 3)
self.assertEqual(module.check_button_name(), "Check")
module = CapaFactory.create(attempts=0)
self.assertEqual(module.check_button_name(), "Check")
def test_check_button_checking_name(self):
module = CapaFactory.create(attempts=1, max_attempts=10)
self.assertEqual(module.check_button_checking_name(), "Checking...")
module = CapaFactory.create(attempts=10, max_attempts=10)
self.assertEqual(module.check_button_checking_name(), "Checking...")
def test_check_button_name_customization(self):
module = CapaFactory.create(
attempts=1,
max_attempts=10,
text_customization={"custom_check": "Submit", "custom_final_check": "Final Submit"}
)
self.assertEqual(module.check_button_name(), "Submit")
module = CapaFactory.create(attempts=9,
max_attempts=10,
text_customization={"custom_check": "Submit", "custom_final_check": "Final Submit"}
)
self.assertEqual(module.check_button_name(), "Final Submit")
def test_check_button_checking_name_customization(self):
module = CapaFactory.create(
attempts=1,
max_attempts=10,
text_customization={
"custom_check": "Submit",
"custom_final_check": "Final Submit",
"custom_checking": "Checking..."
}
)
self.assertEqual(module.check_button_checking_name(), "Checking...")
module = CapaFactory.create(
attempts=9,
max_attempts=10,
text_customization={
"custom_check": "Submit",
"custom_final_check": "Final Submit",
"custom_checking": "Checking..."
}
)
self.assertEqual(module.check_button_checking_name(), "Checking...")
def test_should_show_check_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show check button
module = CapaFactory.create(due=self.yesterday_str)
self.assertFalse(module.should_show_check_button())
# If user is out of attempts, do NOT show the check button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts)
self.assertFalse(module.should_show_check_button())
# If survey question (max_attempts = 0), do NOT show the check button
module = CapaFactory.create(max_attempts=0)
self.assertFalse(module.should_show_check_button())
# If user submitted a problem but hasn't reset,
# do NOT show the check button
# Note: we can only reset when rerandomize="always" or "true"
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=True)
self.assertFalse(module.should_show_check_button())
module = CapaFactory.create(rerandomize="true", done=True)
self.assertFalse(module.should_show_check_button())
# Otherwise, DO show the check button
module = CapaFactory.create()
self.assertTrue(module.should_show_check_button())
# If the user has submitted the problem
# and we do NOT have a reset button, then we can show the check button
# Setting rerandomize to "never" or "false" ensures that the reset button
# is not shown
module = CapaFactory.create(rerandomize=RANDOMIZATION.NEVER, done=True)
self.assertTrue(module.should_show_check_button())
module = CapaFactory.create(rerandomize="false", done=True)
self.assertTrue(module.should_show_check_button())
module = CapaFactory.create(rerandomize=RANDOMIZATION.PER_STUDENT, done=True)
self.assertTrue(module.should_show_check_button())
def test_should_show_reset_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show the reset button
module = CapaFactory.create(due=self.yesterday_str, done=True)
self.assertFalse(module.should_show_reset_button())
# If the user is out of attempts, do NOT show the reset button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True)
self.assertFalse(module.should_show_reset_button())
# pre studio default value, DO show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=True)
self.assertTrue(module.should_show_reset_button())
# If survey question for capa (max_attempts = 0),
# DO show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, max_attempts=0, done=True)
self.assertTrue(module.should_show_reset_button())
# If the question is not correct
# DO show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, max_attempts=0, done=True, correct=False)
self.assertTrue(module.should_show_reset_button())
# If the question is correct and randomization is never
# DO not show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.NEVER, max_attempts=0, done=True, correct=True)
self.assertFalse(module.should_show_reset_button())
# If the question is correct and randomization is always
# Show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, max_attempts=0, done=True, correct=True)
self.assertTrue(module.should_show_reset_button())
# Don't show reset button if randomization is turned on and the question is not done
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, show_reset_button=False, done=False)
self.assertFalse(module.should_show_reset_button())
# Show reset button if randomization is turned on and the problem is done
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, show_reset_button=False, done=True)
self.assertTrue(module.should_show_reset_button())
def test_should_show_save_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show the save button
module = CapaFactory.create(due=self.yesterday_str, done=True)
self.assertFalse(module.should_show_save_button())
# If the user is out of attempts, do NOT show the save button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True)
self.assertFalse(module.should_show_save_button())
# If user submitted a problem but hasn't reset, do NOT show the save button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=True)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(rerandomize="true", done=True)
self.assertFalse(module.should_show_save_button())
# If the user has unlimited attempts and we are not randomizing,
# then do NOT show a save button
# because they can keep using "Check"
module = CapaFactory.create(max_attempts=None, rerandomize=RANDOMIZATION.NEVER, done=False)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(max_attempts=None, rerandomize="false", done=True)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(max_attempts=None, rerandomize=RANDOMIZATION.PER_STUDENT, done=True)
self.assertFalse(module.should_show_save_button())
# pre-studio default, DO show the save button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=False)
self.assertTrue(module.should_show_save_button())
# If we're not randomizing and we have limited attempts, then we can save
module = CapaFactory.create(rerandomize=RANDOMIZATION.NEVER, max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(rerandomize="false", max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(rerandomize=RANDOMIZATION.PER_STUDENT, max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
# If survey question for capa (max_attempts = 0),
# DO show the save button
module = CapaFactory.create(max_attempts=0, done=False)
self.assertTrue(module.should_show_save_button())
def test_should_show_save_button_force_save_button(self):
# If we're after the deadline, do NOT show the save button
# even though we're forcing a save
module = CapaFactory.create(due=self.yesterday_str,
force_save_button="true",
done=True)
self.assertFalse(module.should_show_save_button())
# If the user is out of attempts, do NOT show the save button
attempts = random.randint(1, 10)
module = CapaFactory.create(attempts=attempts,
max_attempts=attempts,
force_save_button="true",
done=True)
self.assertFalse(module.should_show_save_button())
# Otherwise, if we force the save button,
# then show it even if we would ordinarily
# require a reset first
module = CapaFactory.create(force_save_button="true",
rerandomize=RANDOMIZATION.ALWAYS,
done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(force_save_button="true",
rerandomize="true",
done=True)
self.assertTrue(module.should_show_save_button())
def test_no_max_attempts(self):
module = CapaFactory.create(max_attempts='')
html = module.get_problem_html()
self.assertIsNotNone(html)
# assert that we got here without exploding
def test_get_problem_html(self):
module = CapaFactory.create()
# We've tested the show/hide button logic in other tests,
# so here we hard-wire the values
show_check_button = bool(random.randint(0, 1) % 2)
show_reset_button = bool(random.randint(0, 1) % 2)
show_save_button = bool(random.randint(0, 1) % 2)
module.should_show_check_button = Mock(return_value=show_check_button)
module.should_show_reset_button = Mock(return_value=show_reset_button)
module.should_show_save_button = Mock(return_value=show_save_button)
# Mock the system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Patch the capa problem's HTML rendering
with patch('capa.capa_problem.LoncapaProblem.get_html') as mock_html:
mock_html.return_value = "<div>Test Problem HTML</div>"
# Render the problem HTML
html = module.get_problem_html(encapsulate=False)
# Also render the problem encapsulated in a <div>
html_encapsulated = module.get_problem_html(encapsulate=True)
# Expect that we get the rendered template back
self.assertEqual(html, "<div>Test Template HTML</div>")
# Check the rendering context
render_args, _ = module.system.render_template.call_args
self.assertEqual(len(render_args), 2)
template_name = render_args[0]
self.assertEqual(template_name, "problem.html")
context = render_args[1]
self.assertEqual(context['problem']['html'], "<div>Test Problem HTML</div>")
self.assertEqual(bool(context['check_button']), show_check_button)
self.assertEqual(bool(context['reset_button']), show_reset_button)
self.assertEqual(bool(context['save_button']), show_save_button)
# Assert that the encapsulated html contains the original html
self.assertIn(html, html_encapsulated)
demand_xml = """
<problem>
<p>That is the question</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Alpha <choicehint>A hint</choicehint>
</choice>
<choice correct="true">Beta</choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>Demand 1</hint>
<hint>Demand 2</hint>
</demandhint>
</problem>"""
def test_demand_hint(self):
# HTML generation is mocked out to be meaningless here, so instead we check
# the context dict passed into HTML generation.
module = CapaFactory.create(xml=self.demand_xml)
module.get_problem_html() # ignoring html result
context = module.system.render_template.call_args[0][1]
self.assertEqual(context['demand_hint_possible'], True)
# Check the AJAX call that gets the hint by index
result = module.get_demand_hint(0)
self.assertEqual(result['contents'], u'Hint (1 of 2): Demand 1')
self.assertEqual(result['hint_index'], 0)
result = module.get_demand_hint(1)
self.assertEqual(result['contents'], u'Hint (2 of 2): Demand 2')
self.assertEqual(result['hint_index'], 1)
result = module.get_demand_hint(2) # here the server wraps around to index 0
self.assertEqual(result['contents'], u'Hint (1 of 2): Demand 1')
self.assertEqual(result['hint_index'], 0)
def test_demand_hint_logging(self):
module = CapaFactory.create(xml=self.demand_xml)
# Re-mock the module_id to a fixed string, so we can check the logging
module.location = Mock(module.location)
module.location.to_deprecated_string.return_value = 'i4x://edX/capa_test/problem/meh'
with patch.object(module.runtime, 'publish') as mock_track_function:
module.get_problem_html()
module.get_demand_hint(0)
mock_track_function.assert_called_with(
module, 'edx.problem.hint.demandhint_displayed',
{'hint_index': 0, 'module_id': u'i4x://edX/capa_test/problem/meh',
'hint_text': 'Demand 1', 'hint_len': 2}
)
def test_input_state_consistency(self):
module1 = CapaFactory.create()
module2 = CapaFactory.create()
# check to make sure that the input_state and the keys have the same values
module1.set_state_from_lcp()
self.assertEqual(module1.lcp.inputs.keys(), module1.input_state.keys())
module2.set_state_from_lcp()
intersection = set(module2.input_state.keys()).intersection(set(module1.input_state.keys()))
self.assertEqual(len(intersection), 0)
def test_get_problem_html_error(self):
"""
In production, when an error occurs with the problem HTML
rendering, a "dummy" problem is created with an error
message to display to the user.
"""
module = CapaFactory.create()
# Save the original problem so we can compare it later
original_problem = module.lcp
# Simulate throwing an exception when the capa problem
# is asked to render itself as HTML
module.lcp.get_html = Mock(side_effect=Exception("Test"))
# Stub out the get_test_system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Turn off DEBUG
module.system.DEBUG = False
# Try to render the module with DEBUG turned off
html = module.get_problem_html()
self.assertIsNotNone(html)
# Check the rendering context
render_args, _ = module.system.render_template.call_args
context = render_args[1]
self.assertIn("error", context['problem']['html'])
# Expect that the module has created a new dummy problem with the error
self.assertNotEqual(original_problem, module.lcp)
def test_get_problem_html_error_w_debug(self):
"""
Test the html response when an error occurs with DEBUG on
"""
module = CapaFactory.create()
# Simulate throwing an exception when the capa problem
# is asked to render itself as HTML
error_msg = u"Superterrible error happened: ☠"
module.lcp.get_html = Mock(side_effect=Exception(error_msg))
# Stub out the get_test_system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Make sure DEBUG is on
module.system.DEBUG = True
# Try to render the module with DEBUG turned on
html = module.get_problem_html()
self.assertIsNotNone(html)
# Check the rendering context
render_args, _ = module.system.render_template.call_args
context = render_args[1]
self.assertIn(error_msg, context['problem']['html'])
@ddt.data(
'false',
'true',
RANDOMIZATION.NEVER,
RANDOMIZATION.PER_STUDENT,
RANDOMIZATION.ALWAYS,
RANDOMIZATION.ONRESET
)
def test_random_seed_no_change(self, rerandomize):
# Run the test for each possible rerandomize value
module = CapaFactory.create(rerandomize=rerandomize)
# Get the seed
# By this point, the module should have persisted the seed
seed = module.seed
self.assertIsNotNone(seed)
# If we're not rerandomizing, the seed is always set
# to the same value (1)
if rerandomize == RANDOMIZATION.NEVER:
self.assertEqual(seed, 1,
msg="Seed should always be 1 when rerandomize='%s'" % rerandomize)
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that the seed is the same
self.assertEqual(seed, module.seed)
# Save the problem
module.save_problem(get_request_dict)
# Expect that the seed is the same
self.assertEqual(seed, module.seed)
@ddt.data(
'false',
'true',
RANDOMIZATION.NEVER,
RANDOMIZATION.PER_STUDENT,
RANDOMIZATION.ALWAYS,
RANDOMIZATION.ONRESET
)
def test_random_seed_with_reset(self, rerandomize):
"""
Run the test for each possible rerandomize value
"""
def _reset_and_get_seed(module):
"""
Reset the XModule and return the module's seed
"""
# Simulate submitting an attempt
# We need to do this, or reset_problem() will
# fail because it won't re-randomize until the problem has been submitted
# the problem yet.
module.done = True
# Reset the problem
module.reset_problem({})
# Return the seed
return module.seed
def _retry_and_check(num_tries, test_func):
'''
Returns True if *test_func* was successful
(returned True) within *num_tries* attempts
*test_func* must be a function
of the form test_func() -> bool
'''
success = False
for __ in range(num_tries):
if test_func() is True:
success = True
break
return success
module = CapaFactory.create(rerandomize=rerandomize, done=True)
# Get the seed
# By this point, the module should have persisted the seed
seed = module.seed
self.assertIsNotNone(seed)
# We do NOT want the seed to reset if rerandomize
# is set to 'never' -- it should still be 1
# The seed also stays the same if we're randomizing
# 'per_student': the same student should see the same problem
if rerandomize in [RANDOMIZATION.NEVER,
'false',
RANDOMIZATION.PER_STUDENT]:
self.assertEqual(seed, _reset_and_get_seed(module))
# Otherwise, we expect the seed to change
# to another valid seed
else:
# Since there's a small chance we might get the
# same seed again, give it 5 chances
# to generate a different seed
success = _retry_and_check(5, lambda: _reset_and_get_seed(module) != seed)
self.assertIsNotNone(module.seed)
msg = 'Could not get a new seed from reset after 5 tries'
self.assertTrue(success, msg)
@ddt.data(
'false',
'true',
RANDOMIZATION.NEVER,
RANDOMIZATION.PER_STUDENT,
RANDOMIZATION.ALWAYS,
RANDOMIZATION.ONRESET
)
def test_random_seed_with_reset_question_unsubmitted(self, rerandomize):
"""
Run the test for each possible rerandomize value
"""
def _reset_and_get_seed(module):
"""
Reset the XModule and return the module's seed
"""
# Reset the problem
# By default, the problem is instantiated as unsubmitted
module.reset_problem({})
# Return the seed
return module.seed
module = CapaFactory.create(rerandomize=rerandomize, done=False)
# Get the seed
# By this point, the module should have persisted the seed
seed = module.seed
self.assertIsNotNone(seed)
#the seed should never change because the student hasn't finished the problem
self.assertEqual(seed, _reset_and_get_seed(module))
@ddt.data(
RANDOMIZATION.ALWAYS,
RANDOMIZATION.PER_STUDENT,
'true',
RANDOMIZATION.ONRESET
)
def test_random_seed_bins(self, rerandomize):
# Assert that we are limiting the number of possible seeds.
# Get a bunch of seeds, they should all be in 0-999.
i = 200
while i > 0:
module = CapaFactory.create(rerandomize=rerandomize)
assert 0 <= module.seed < 1000
i -= 1
@patch('xmodule.capa_base.log')
@patch('xmodule.capa_base.Progress')
def test_get_progress_error(self, mock_progress, mock_log):
"""
Check that an exception given in `Progress` produces a `log.exception` call.
"""
error_types = [TypeError, ValueError]
for error_type in error_types:
mock_progress.side_effect = error_type
module = CapaFactory.create()
self.assertIsNone(module.get_progress())
mock_log.exception.assert_called_once_with('Got bad progress')
mock_log.reset_mock()
@patch('xmodule.capa_base.Progress')
def test_get_progress_no_error_if_weight_zero(self, mock_progress):
"""
Check that if the weight is 0 get_progress does not try to create a Progress object.
"""
mock_progress.return_value = True
module = CapaFactory.create()
module.weight = 0
progress = module.get_progress()
self.assertIsNone(progress)
self.assertFalse(mock_progress.called)
@patch('xmodule.capa_base.Progress')
def test_get_progress_calculate_progress_fraction(self, mock_progress):
"""
Check that score and total are calculated correctly for the progress fraction.
"""
module = CapaFactory.create()
module.weight = 1
module.get_progress()
mock_progress.assert_called_with(0, 1)
other_module = CapaFactory.create(correct=True)
other_module.weight = 1
other_module.get_progress()
mock_progress.assert_called_with(1, 1)
def test_get_html(self):
"""
Check that get_html() calls get_progress() with no arguments.
"""
module = CapaFactory.create()
module.get_progress = Mock(wraps=module.get_progress)
module.get_html()
module.get_progress.assert_called_once_with()
def test_get_problem(self):
"""
Check that get_problem() returns the expected dictionary.
"""
module = CapaFactory.create()
self.assertEquals(module.get_problem("data"), {'html': module.get_problem_html(encapsulate=False)})
# Standard question with shuffle="true" used by a few tests
common_shuffle_xml = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
def test_check_unmask(self):
"""
Check that shuffle unmasking is plumbed through: when check_problem is called,
unmasked names should appear in the track_function event_info.
"""
module = CapaFactory.create(xml=self.common_shuffle_xml)
with patch.object(module.runtime, 'publish') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'choice_3'} # the correct choice
module.check_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[1]
event_info = mock_call[1][2]
self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_3')
# 'permutation' key added to record how problem was shown
self.assertEquals(event_info['permutation'][CapaFactory.answer_key()],
('shuffle', ['choice_3', 'choice_1', 'choice_2', 'choice_0']))
self.assertEquals(event_info['success'], 'correct')
@unittest.skip("masking temporarily disabled")
def test_save_unmask(self):
"""On problem save, unmasked data should appear on track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
with patch.object(module.runtime, 'track_function') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.save_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(event_info['answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
@unittest.skip("masking temporarily disabled")
def test_reset_unmask(self):
"""On problem reset, unmask names should appear track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.check_problem(get_request_dict)
# On reset, 'old_state' should use unmasked names
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.reset_problem(None)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(mock_call[1][0], 'reset_problem')
self.assertEquals(event_info['old_state']['student_answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
@unittest.skip("masking temporarily disabled")
def test_rescore_unmask(self):
"""On problem rescore, unmasked names should appear on track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.check_problem(get_request_dict)
# On rescore, state/student_answers should use unmasked names
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.rescore_problem()
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(mock_call[1][0], 'problem_rescore')
self.assertEquals(event_info['state']['student_answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
def test_check_unmask_answerpool(self):
"""Check answer-pool question track_function uses unmasked names"""
xml = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
module = CapaFactory.create(xml=xml)
with patch.object(module.runtime, 'publish') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'choice_2'} # mask_X form when masking enabled
module.check_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[1]
event_info = mock_call[1][2]
self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_2')
# 'permutation' key added to record how problem was shown
self.assertEquals(event_info['permutation'][CapaFactory.answer_key()],
('answerpool', ['choice_1', 'choice_3', 'choice_2', 'choice_0']))
self.assertEquals(event_info['success'], 'incorrect')
@ddt.unpack
@ddt.data(
{'display_name': None, 'expected_display_name': 'problem'},
{'display_name': '', 'expected_display_name': 'problem'},
{'display_name': ' ', 'expected_display_name': 'problem'},
{'display_name': 'CAPA 101', 'expected_display_name': 'CAPA 101'}
)
def test_problem_display_name_with_default(self, display_name, expected_display_name):
"""
Verify that display_name_with_default works as expected.
"""
module = CapaFactory.create(display_name=display_name)
self.assertEqual(module.display_name_with_default, expected_display_name)
@ddt.data(
'',
' ',
)
def test_problem_no_display_name(self, display_name):
"""
Verify that if problem display name is not provided then a default name is used.
"""
module = CapaFactory.create(display_name=display_name)
module.get_problem_html()
render_args, _ = module.system.render_template.call_args
context = render_args[1]
self.assertEqual(context['problem']['name'], module.location.block_type)
@ddt.ddt
class CapaDescriptorTest(unittest.TestCase):
sample_checkbox_problem_xml = textwrap.dedent("""
<problem>
<p>Title</p>
<p>Description</p>
<p>Example</p>
<p>The following languages are in the Indo-European family:</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Urdu</choice>
<choice correct="false">Finnish</choice>
<choice correct="true">Marathi</choice>
<choice correct="true">French</choice>
<choice correct="false">Hungarian</choice>
</checkboxgroup>
</choiceresponse>
<p>Note: Make sure you select all of the correct options—there may be more than one!</p>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Solution for CAPA problem</p>
</div>
</solution>
</problem>
""")
sample_dropdown_problem_xml = textwrap.dedent("""
<problem>
<p>Dropdown problems allow learners to select only one option from a list of options.</p>
<p>Description</p>
<p>You can use the following example problem as a model.</p>
<p> Which of the following countries celebrates its independence on August 15?</p>
<optionresponse>
<optioninput options="('India','Spain','China','Bermuda')" correct="India"></optioninput>
</optionresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p> India became an independent nation on August 15, 1947.</p>
</div>
</solution>
</problem>
""")
sample_multichoice_problem_xml = textwrap.dedent("""
<problem>
<p>Multiple choice problems allow learners to select only one option.</p>
<p>When you add the problem, be sure to select Settings to specify a Display Name and other values.</p>
<p>You can use the following example problem as a model.</p>
<p>Which of the following countries has the largest population?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">Brazil
<choicehint>timely feedback -- explain why an almost correct answer is wrong</choicehint>
</choice>
<choice correct="false">Germany</choice>
<choice correct="true">Indonesia</choice>
<choice correct="false">Russia</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>According to September 2014 estimates:</p>
<p>The population of Indonesia is approximately 250 million.</p>
<p>The population of Brazil is approximately 200 million.</p>
<p>The population of Russia is approximately 146 million.</p>
<p>The population of Germany is approximately 81 million.</p>
</div>
</solution>
</problem>
""")
sample_numerical_input_problem_xml = textwrap.dedent("""
<problem>
<p>In a numerical input problem, learners enter numbers or a specific and relatively simple mathematical
expression. Learners enter the response in plain text, and the system then converts the text to a symbolic
expression that learners can see below the response field.</p>
<p>The system can handle several types of characters, including basic operators, fractions, exponents, and
common constants such as "i". You can refer learners to "Entering Mathematical and Scientific Expressions"
in the edX Guide for Students for more information.</p>
<p>When you add the problem, be sure to select Settings to specify a Display Name and other values that
apply.</p>
<p>You can use the following example problems as models.</p>
<p>How many miles away from Earth is the sun? Use scientific notation to answer.</p>
<numericalresponse answer="9.3*10^7">
<formulaequationinput/>
</numericalresponse>
<p>The square of what number is -100?</p>
<numericalresponse answer="10*i">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>The sun is 93,000,000, or 9.3*10^7, miles away from Earth.</p>
<p>-100 is the square of 10 times the imaginary number, i.</p>
</div>
</solution>
</problem>
""")
sample_text_input_problem_xml = textwrap.dedent("""
<problem>
<p>In text input problems, also known as "fill-in-the-blank" problems, learners enter text into a response
field. The text can include letters and characters such as punctuation marks. The text that the learner
enters must match your specified answer text exactly. You can specify more than one correct answer.
Learners must enter a response that matches one of the correct answers exactly.</p>
<p>When you add the problem, be sure to select Settings to specify a Display Name and other values that
apply.</p>
<p>You can use the following example problem as a model.</p>
<p>What was the first post-secondary school in China to allow both male and female students?</p>
<stringresponse answer="Nanjing Higher Normal Institute" type="ci" >
<additional_answer answer="National Central University"></additional_answer>
<additional_answer answer="Nanjing University"></additional_answer>
<textline size="20"/>
</stringresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>Nanjing Higher Normal Institute first admitted female students in 1920.</p>
</div>
</solution>
</problem>
""")
sample_checkboxes_with_hints_and_feedback_problem_xml = textwrap.dedent("""
<problem>
<p>You can provide feedback for each option in a checkbox problem, with distinct feedback depending on
whether or not the learner selects that option.</p>
<p>You can also provide compound feedback for a specific combination of answers. For example, if you have
three possible answers in the problem, you can configure specific feedback for when a learner selects each
combination of possible answers.</p>
<p>You can also add hints for learners.</p>
<p>Be sure to select Settings to specify a Display Name and other values that apply.</p>
<p>Use the following example problem as a model.</p>
<p>Which of the following is a fruit? Check all that apply.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">apple
<choicehint selected="true">You are correct that an apple is a fruit because it is the fertilized
ovary that comes from an apple tree and contains seeds.</choicehint>
<choicehint selected="false">Remember that an apple is also a fruit.</choicehint></choice>
<choice correct="true">pumpkin
<choicehint selected="true">You are correct that a pumpkin is a fruit because it is the fertilized
ovary of a squash plant and contains seeds.</choicehint>
<choicehint selected="false">Remember that a pumpkin is also a fruit.</choicehint></choice>
<choice correct="false">potato
<choicehint selected="true">A potato is a vegetable, not a fruit, because it does not come from a
flower and does not contain seeds.</choicehint>
<choicehint selected="false">You are correct that a potato is a vegetable because it is an edible
part of a plant in tuber form.</choicehint></choice>
<choice correct="true">tomato
<choicehint selected="true">You are correct that a tomato is a fruit because it is the fertilized
ovary of a tomato plant and contains seeds.</choicehint>
<choicehint selected="false">Many people mistakenly think a tomato is a vegetable. However, because
a tomato is the fertilized ovary of a tomato plant and contains seeds, it is a fruit.</choicehint>
</choice>
<compoundhint value="A B D">An apple, pumpkin, and tomato are all fruits as they all are fertilized
ovaries of a plant and contain seeds.</compoundhint>
<compoundhint value="A B C D">You are correct that an apple, pumpkin, and tomato are all fruits as they
all are fertilized ovaries of a plant and contain seeds. However, a potato is not a fruit as it is an
edible part of a plant in tuber form and is a vegetable.</compoundhint>
</checkboxgroup>
</choiceresponse>
<demandhint>
<hint>A fruit is the fertilized ovary from a flower.</hint>
<hint>A fruit contains seeds of the plant.</hint>
</demandhint>
</problem>
""")
sample_dropdown_with_hints_and_feedback_problem_xml = textwrap.dedent("""
<problem>
<p>You can provide feedback for each available option in a dropdown problem.</p>
<p>You can also add hints for learners.</p>
<p>Be sure to select Settings to specify a Display Name and other values that apply.</p>
<p>Use the following example problem as a model.</p>
<p> A/an ________ is a vegetable.</p>
<optionresponse>
<optioninput>
<option correct="False">apple <optionhint>An apple is the fertilized ovary that comes from an apple
tree and contains seeds, meaning it is a fruit.</optionhint></option>
<option correct="False">pumpkin <optionhint>A pumpkin is the fertilized ovary of a squash plant and
contains seeds, meaning it is a fruit.</optionhint></option>
<option correct="True">potato <optionhint>A potato is an edible part of a plant in tuber form and is a
vegetable.</optionhint></option>
<option correct="False">tomato <optionhint>Many people mistakenly think a tomato is a vegetable.
However, because a tomato is the fertilized ovary of a tomato plant and contains seeds, it is a fruit.
</optionhint></option>
</optioninput>
</optionresponse>
<demandhint>
<hint>A fruit is the fertilized ovary from a flower.</hint>
<hint>A fruit contains seeds of the plant.</hint>
</demandhint>
</problem>
""")
sample_multichoice_with_hints_and_feedback_problem_xml = textwrap.dedent("""
<problem>
<p>You can provide feedback for each option in a multiple choice problem.</p>
<p>You can also add hints for learners.</p>
<p>Be sure to select Settings to specify a Display Name and other values that apply.</p>
<p>Use the following example problem as a model.</p>
<p>Which of the following is a vegetable?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">apple <choicehint>An apple is the fertilized ovary that comes from an apple
tree and contains seeds, meaning it is a fruit.</choicehint></choice>
<choice correct="false">pumpkin <choicehint>A pumpkin is the fertilized ovary of a squash plant and
contains seeds, meaning it is a fruit.</choicehint></choice>
<choice correct="true">potato <choicehint>A potato is an edible part of a plant in tuber form and is a
vegetable.</choicehint></choice>
<choice correct="false">tomato <choicehint>Many people mistakenly think a tomato is a vegetable.
However, because a tomato is the fertilized ovary of a tomato plant and contains seeds, it is a fruit.
</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>A fruit is the fertilized ovary from a flower.</hint>
<hint>A fruit contains seeds of the plant.</hint>
</demandhint>
</problem>
""")
sample_numerical_input_with_hints_and_feedback_problem_xml = textwrap.dedent("""
<problem>
<p>You can provide feedback for correct answers in numerical input problems. You cannot provide feedback
for incorrect answers.</p>
<p>Use feedback for the correct answer to reinforce the process for arriving at the numerical value.</p>
<p>You can also add hints for learners.</p>
<p>Be sure to select Settings to specify a Display Name and other values that apply.</p>
<p>Use the following example problem as a model.</p>
<p>What is the arithmetic mean for the following set of numbers? (1, 5, 6, 3, 5)</p>
<numericalresponse answer="4">
<formulaequationinput/>
<correcthint>The mean for this set of numbers is 20 / 5, which equals 4.</correcthint>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>The mean is calculated by summing the set of numbers and dividing by n. In this case:
(1 + 5 + 6 + 3 + 5) / 5 = 20 / 5 = 4.</p>
</div>
</solution>
<demandhint>
<hint>The mean is calculated by summing the set of numbers and dividing by n.</hint>
<hint>n is the count of items in the set.</hint>
</demandhint>
</problem>
""")
sample_text_input_with_hints_and_feedback_problem_xml = textwrap.dedent("""
<problem>
<p>You can provide feedback for the correct answer in text input problems, as well as for specific
incorrect answers.</p>
<p>Use feedback on expected incorrect answers to address common misconceptions and to provide guidance on
how to arrive at the correct answer.</p>
<p>Be sure to select Settings to specify a Display Name and other values that apply.</p>
<p>Use the following example problem as a model.</p>
<p>Which U.S. state has the largest land area?</p>
<stringresponse answer="Alaska" type="ci" >
<correcthint>Alaska is 576,400 square miles, more than double the land area of the second largest state,
Texas.</correcthint>
<stringequalhint answer="Texas">While many people think Texas is the largest state, it is actually the
second largest, with 261,797 square miles.</stringequalhint>
<stringequalhint answer="California">California is the third largest state, with 155,959 square miles.
</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>Consider the square miles, not population.</hint>
<hint>Consider all 50 states, not just the continental United States.</hint>
</demandhint>
</problem>
""")
def _create_descriptor(self, xml, name=None):
""" Creates a CapaDescriptor to run test against """
descriptor = CapaDescriptor(get_test_system(), scope_ids=1)
descriptor.data = xml
if name:
descriptor.display_name = name
return descriptor
@ddt.data(*responsetypes.registry.registered_tags())
def test_all_response_types(self, response_tag):
""" Tests that every registered response tag is correctly returned """
xml = "<problem><{response_tag}></{response_tag}></problem>".format(response_tag=response_tag)
name = "Some Capa Problem"
descriptor = self._create_descriptor(xml, name=name)
self.assertEquals(descriptor.problem_types, {response_tag})
self.assertEquals(descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': [response_tag],
'content': {
'display_name': name,
'capa_content': ''
}
})
def test_response_types_ignores_non_response_tags(self):
xml = textwrap.dedent("""
<problem>
<p>Label</p>
<div>Some comment</div>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
name = "Test Capa Problem"
descriptor = self._create_descriptor(xml, name=name)
self.assertEquals(descriptor.problem_types, {"multiplechoiceresponse"})
self.assertEquals(descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["multiplechoiceresponse"],
'content': {
'display_name': name,
'capa_content': ' Label Some comment Apple Banana Chocolate Donut '
}
})
def test_response_types_multiple_tags(self):
xml = textwrap.dedent("""
<problem>
<p>Label</p>
<div>Some comment</div>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="1">
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="1">
<choice correct ="true">Buggy</choice>
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput options="('1','2')" correct="2"></optioninput>
</optionresponse>
</problem>
""")
name = "Other Test Capa Problem"
descriptor = self._create_descriptor(xml, name=name)
self.assertEquals(descriptor.problem_types, {"multiplechoiceresponse", "optionresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["optionresponse", "multiplechoiceresponse"],
'content': {
'display_name': name,
'capa_content': ' Label Some comment Donut Buggy '
}
}
)
def test_solutions_not_indexed(self):
xml = textwrap.dedent("""
<problem>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>This is what the 1st solution.</p>
</div>
</solution>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution.</p>
</div>
</solution>
</problem>
""")
name = "Blank Common Capa Problem"
descriptor = self._create_descriptor(xml, name=name)
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': [],
'content': {
'display_name': name,
'capa_content': ' '
}
}
)
def test_indexing_checkboxes(self):
name = "Checkboxes"
descriptor = self._create_descriptor(self.sample_checkbox_problem_xml, name=name)
capa_content = textwrap.dedent("""
Title
Description
Example
The following languages are in the Indo-European family:
Urdu
Finnish
Marathi
French
Hungarian
Note: Make sure you select all of the correct options—there may be more than one!
""")
self.assertEquals(descriptor.problem_types, {"choiceresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["choiceresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_dropdown(self):
name = "Dropdown"
descriptor = self._create_descriptor(self.sample_dropdown_problem_xml, name=name)
capa_content = textwrap.dedent("""
Dropdown problems allow learners to select only one option from a list of options.
Description
You can use the following example problem as a model.
Which of the following countries celebrates its independence on August 15?
""")
self.assertEquals(descriptor.problem_types, {"optionresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["optionresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_multiple_choice(self):
name = "Multiple Choice"
descriptor = self._create_descriptor(self.sample_multichoice_problem_xml, name=name)
capa_content = textwrap.dedent("""
Multiple choice problems allow learners to select only one option.
When you add the problem, be sure to select Settings to specify a Display Name and other values.
You can use the following example problem as a model.
Which of the following countries has the largest population?
Brazil
Germany
Indonesia
Russia
""")
self.assertEquals(descriptor.problem_types, {"multiplechoiceresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["multiplechoiceresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_numerical_input(self):
name = "Numerical Input"
descriptor = self._create_descriptor(self.sample_numerical_input_problem_xml, name=name)
capa_content = textwrap.dedent("""
In a numerical input problem, learners enter numbers or a specific and relatively simple mathematical
expression. Learners enter the response in plain text, and the system then converts the text to a symbolic
expression that learners can see below the response field.
The system can handle several types of characters, including basic operators, fractions, exponents, and
common constants such as "i". You can refer learners to "Entering Mathematical and Scientific Expressions"
in the edX Guide for Students for more information.
When you add the problem, be sure to select Settings to specify a Display Name and other values that
apply.
You can use the following example problems as models.
How many miles away from Earth is the sun? Use scientific notation to answer.
The square of what number is -100?
""")
self.assertEquals(descriptor.problem_types, {"numericalresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["numericalresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_text_input(self):
name = "Text Input"
descriptor = self._create_descriptor(self.sample_text_input_problem_xml, name=name)
capa_content = textwrap.dedent("""
In text input problems, also known as "fill-in-the-blank" problems, learners enter text into a response
field. The text can include letters and characters such as punctuation marks. The text that the learner
enters must match your specified answer text exactly. You can specify more than one correct answer.
Learners must enter a response that matches one of the correct answers exactly.
When you add the problem, be sure to select Settings to specify a Display Name and other values that
apply.
You can use the following example problem as a model.
What was the first post-secondary school in China to allow both male and female students?
""")
self.assertEquals(descriptor.problem_types, {"stringresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["stringresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_checkboxes_with_hints_and_feedback(self):
name = "Checkboxes with Hints and Feedback"
descriptor = self._create_descriptor(self.sample_checkboxes_with_hints_and_feedback_problem_xml, name=name)
capa_content = textwrap.dedent("""
You can provide feedback for each option in a checkbox problem, with distinct feedback depending on
whether or not the learner selects that option.
You can also provide compound feedback for a specific combination of answers. For example, if you have
three possible answers in the problem, you can configure specific feedback for when a learner selects each
combination of possible answers.
You can also add hints for learners.
Be sure to select Settings to specify a Display Name and other values that apply.
Use the following example problem as a model.
Which of the following is a fruit? Check all that apply.
apple
pumpkin
potato
tomato
""")
self.assertEquals(descriptor.problem_types, {"choiceresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["choiceresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_dropdown_with_hints_and_feedback(self):
name = "Dropdown with Hints and Feedback"
descriptor = self._create_descriptor(self.sample_dropdown_with_hints_and_feedback_problem_xml, name=name)
capa_content = textwrap.dedent("""
You can provide feedback for each available option in a dropdown problem.
You can also add hints for learners.
Be sure to select Settings to specify a Display Name and other values that apply.
Use the following example problem as a model.
A/an ________ is a vegetable.
apple
pumpkin
potato
tomato
""")
self.assertEquals(descriptor.problem_types, {"optionresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["optionresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_multiple_choice_with_hints_and_feedback(self):
name = "Multiple Choice with Hints and Feedback"
descriptor = self._create_descriptor(self.sample_multichoice_with_hints_and_feedback_problem_xml, name=name)
capa_content = textwrap.dedent("""
You can provide feedback for each option in a multiple choice problem.
You can also add hints for learners.
Be sure to select Settings to specify a Display Name and other values that apply.
Use the following example problem as a model.
Which of the following is a vegetable?
apple
pumpkin
potato
tomato
""")
self.assertEquals(descriptor.problem_types, {"multiplechoiceresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["multiplechoiceresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_numerical_input_with_hints_and_feedback(self):
name = "Numerical Input with Hints and Feedback"
descriptor = self._create_descriptor(self.sample_numerical_input_with_hints_and_feedback_problem_xml, name=name)
capa_content = textwrap.dedent("""
You can provide feedback for correct answers in numerical input problems. You cannot provide feedback
for incorrect answers.
Use feedback for the correct answer to reinforce the process for arriving at the numerical value.
You can also add hints for learners.
Be sure to select Settings to specify a Display Name and other values that apply.
Use the following example problem as a model.
What is the arithmetic mean for the following set of numbers? (1, 5, 6, 3, 5)
""")
self.assertEquals(descriptor.problem_types, {"numericalresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["numericalresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_text_input_with_hints_and_feedback(self):
name = "Text Input with Hints and Feedback"
descriptor = self._create_descriptor(self.sample_text_input_with_hints_and_feedback_problem_xml, name=name)
capa_content = textwrap.dedent("""
You can provide feedback for the correct answer in text input problems, as well as for specific
incorrect answers.
Use feedback on expected incorrect answers to address common misconceptions and to provide guidance on
how to arrive at the correct answer.
Be sure to select Settings to specify a Display Name and other values that apply.
Use the following example problem as a model.
Which U.S. state has the largest land area?
""")
self.assertEquals(descriptor.problem_types, {"stringresponse"})
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': ["stringresponse"],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_indexing_problem_with_html_tags(self):
sample_problem_xml = textwrap.dedent("""
<problem>
<style>p {left: 10px;}</style>
<!-- Beginning of the html -->
<p>This has HTML comment in it.<!-- Commenting Content --></p>
<!-- Here comes CDATA -->
<![CDATA[This is just a CDATA!]]>
<p>HTML end.</p>
<!-- Script that makes everything alive! -->
<script>
var alive;
</script>
</problem>
""")
name = "Mixed business"
descriptor = self._create_descriptor(sample_problem_xml, name=name)
capa_content = textwrap.dedent("""
This has HTML comment in it.
HTML end.
""")
self.assertEquals(
descriptor.index_dictionary(), {
'content_type': CapaDescriptor.INDEX_CONTENT_TYPE,
'problem_types': [],
'content': {
'display_name': name,
'capa_content': capa_content.replace("\n", " ")
}
}
)
def test_invalid_xml_handling(self):
"""
Tests to confirm that invalid XML does not throw a wake-up-ops level error.
See TNL-5057 for quick fix, TNL-5245 for full resolution.
"""
sample_invalid_xml = textwrap.dedent("""
<problem>
</proble-oh no my finger broke and I can't close the problem tag properly...
""")
descriptor = self._create_descriptor(sample_invalid_xml, name="Invalid XML")
try:
descriptor.has_support(None, "multi_device")
except etree.XMLSyntaxError:
self.fail("Exception raised during XML parsing, this method should be resilient to such errors")
class ComplexEncoderTest(unittest.TestCase):
def test_default(self):
"""
Check that complex numbers can be encoded into JSON.
"""
complex_num = 1 - 1j
expected_str = '1-1*j'
json_str = json.dumps(complex_num, cls=ComplexEncoder)
self.assertEqual(expected_str, json_str[1:-1]) # ignore quotes
class TestProblemCheckTracking(unittest.TestCase):
"""
Ensure correct tracking information is included in events emitted during problem checks.
"""
def setUp(self):
super(TestProblemCheckTracking, self).setUp()
self.maxDiff = None
def test_choice_answer_text(self):
xml = """\
<problem display_name="Multiple Choice Questions">
<optionresponse>
<label>What color is the open ocean on a sunny day?</label>
<optioninput options="('yellow','blue','green')" correct="blue"/>
</optionresponse>
<multiplechoiceresponse>
<label>Which piece of furniture is built for sitting?</label>
<choicegroup type="MultipleChoice">
<choice correct="false"><text>a table</text></choice>
<choice correct="false"><text>a desk</text></choice>
<choice correct="true"><text>a chair</text></choice>
<choice correct="false"><text>a bookshelf</text></choice>
</choicegroup>
</multiplechoiceresponse>
<choiceresponse>
<label>Which of the following are musical instruments?</label>
<checkboxgroup>
<choice correct="true">a piano</choice>
<choice correct="false">a tree</choice>
<choice correct="true">a guitar</choice>
<choice correct="false">a window</choice>
</checkboxgroup>
</choiceresponse>
</problem>
"""
# Whitespace screws up comparisons
xml = ''.join(line.strip() for line in xml.split('\n'))
factory = self.capa_factory_for_problem_xml(xml)
module = factory.create()
answer_input_dict = {
factory.input_key(2): 'blue',
factory.input_key(3): 'choice_0',
factory.input_key(4): ['choice_0', 'choice_1'],
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': 'What color is the open ocean on a sunny day?',
'answer': 'blue',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': True,
'variant': '',
},
factory.answer_key(3): {
'question': 'Which piece of furniture is built for sitting?',
'answer': u'<text>a table</text>',
'response_type': 'multiplechoiceresponse',
'input_type': 'choicegroup',
'correct': False,
'variant': '',
},
factory.answer_key(4): {
'question': 'Which of the following are musical instruments?',
'answer': [u'a piano', u'a tree'],
'response_type': 'choiceresponse',
'input_type': 'checkboxgroup',
'correct': False,
'variant': '',
},
})
def capa_factory_for_problem_xml(self, xml):
class CustomCapaFactory(CapaFactory):
"""
A factory for creating a Capa problem with arbitrary xml.
"""
sample_problem_xml = textwrap.dedent(xml)
return CustomCapaFactory
def get_event_for_answers(self, module, answer_input_dict):
with patch.object(module.runtime, 'publish') as mock_track_function:
module.check_problem(answer_input_dict)
self.assertGreaterEqual(len(mock_track_function.mock_calls), 2)
# There are potentially 2 track logs: answers and hint. [-1]=answers.
mock_call = mock_track_function.mock_calls[-1]
event = mock_call[1][2]
return event
def test_numerical_textline(self):
factory = CapaFactory
module = factory.create()
answer_input_dict = {
factory.input_key(2): '3.14'
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': '3.14',
'response_type': 'numericalresponse',
'input_type': 'textline',
'correct': True,
'variant': '',
}
})
def test_multiple_inputs(self):
group_label = 'Choose the correct color'
input1_label = 'What color is the sky?'
input2_label = 'What color are pine needles?'
factory = self.capa_factory_for_problem_xml("""\
<problem display_name="Multiple Inputs">
<optionresponse>
<label>{}</label>
<optioninput options="('yellow','blue','green')" correct="blue" label="{}"/>
<optioninput options="('yellow','blue','green')" correct="green" label="{}"/>
</optionresponse>
</problem>
""".format(group_label, input1_label, input2_label))
module = factory.create()
answer_input_dict = {
factory.input_key(2, 1): 'blue',
factory.input_key(2, 2): 'yellow',
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2, 1): {
'group_label': group_label,
'question': input1_label,
'answer': 'blue',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': True,
'variant': '',
},
factory.answer_key(2, 2): {
'group_label': group_label,
'question': input2_label,
'answer': 'yellow',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': False,
'variant': '',
},
})
def test_optioninput_extended_xml(self):
"""Test the new XML form of writing with <option> tag instead of options= attribute."""
group_label = 'Are you the Gatekeeper?'
input1_label = 'input 1 label'
input2_label = 'input 2 label'
factory = self.capa_factory_for_problem_xml("""\
<problem display_name="Woo Hoo">
<optionresponse>
<label>{}</label>
<optioninput label="{}">
<option correct="True" label="Good Job">
apple
<optionhint>
banana
</optionhint>
</option>
<option correct="False" label="blorp">
cucumber
<optionhint>
donut
</optionhint>
</option>
</optioninput>
<optioninput label="{}">
<option correct="True">
apple
<optionhint>
banana
</optionhint>
</option>
<option correct="False">
cucumber
<optionhint>
donut
</optionhint>
</option>
</optioninput>
</optionresponse>
</problem>
""".format(group_label, input1_label, input2_label))
module = factory.create()
answer_input_dict = {
factory.input_key(2, 1): 'apple',
factory.input_key(2, 2): 'cucumber',
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2, 1): {
'group_label': group_label,
'question': input1_label,
'answer': 'apple',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': True,
'variant': '',
},
factory.answer_key(2, 2): {
'group_label': group_label,
'question': input2_label,
'answer': 'cucumber',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': False,
'variant': '',
},
})
def test_rerandomized_inputs(self):
factory = CapaFactory
module = factory.create(rerandomize=RANDOMIZATION.ALWAYS)
answer_input_dict = {
factory.input_key(2): '3.14'
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': '3.14',
'response_type': 'numericalresponse',
'input_type': 'textline',
'correct': True,
'variant': module.seed,
}
})
def test_file_inputs(self):
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
factory = CapaFactoryWithFiles
module = factory.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok")) # pylint: disable=protected-access
module.system.xqueue['interface'] = xqueue_interface
answer_input_dict = {
CapaFactoryWithFiles.input_key(response_num=2): fileobjs,
CapaFactoryWithFiles.input_key(response_num=3): 'None',
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': fpaths,
'response_type': 'coderesponse',
'input_type': 'filesubmission',
'correct': False,
'variant': '',
},
factory.answer_key(3): {
'answer': 'None',
'correct': True,
'question': '',
'response_type': 'customresponse',
'input_type': 'textline',
'variant': ''
}
})
def test_get_answer_with_jump_to_id_urls(self):
"""
Make sure replace_jump_to_id_urls() is called in get_answer.
"""
problem_xml = textwrap.dedent("""
<problem>
<p>What is 1+4?</p>
<numericalresponse answer="5">
<formulaequationinput />
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<a href="/jump_to_id/c0f8d54964bc44a4a1deb8ecce561ecd">here's the same link to the hint page.</a>
</div>
</solution>
</problem>
""")
data = dict()
problem = CapaFactory.create(showanswer='always', xml=problem_xml)
problem.runtime.replace_jump_to_id_urls = Mock()
problem.get_answer(data)
self.assertTrue(problem.runtime.replace_jump_to_id_urls.called)
|
Jortolsa/l10n-spain | refs/heads/8.0 | l10n_es_aeat_mod340/__init__.py | 14 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 20011 Ting (http://www.ting.es)
# Copyright (c) 2011-2013 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas Izquierdo <ignacio@acysos.com>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import report
from . import wizard
from . import models
|
466152112/scikit-learn | refs/heads/master | sklearn/neighbors/tests/test_kd_tree.py | 129 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
|
toomanyjoes/mrperfcs386m | refs/heads/master | evalResults/baseFiles_numHosts/conv.py | 5 | #!/usr/bin/python
"""Hadoop Simulator
This simulator takes three configuration files, topology.xml, metadata.xml,
and job.xml, describing a Hadoop job and the topology it will run on.
Two tcl files, topology.tcl and events.tcl, will be generated as input
for ns-2 for further simulation.
"""
#import xml.dom
import xml.dom.minidom
import sys
from optparse import OptionParser
import random
from gen import *
#import getopt
def convert(topo_xml):
root = xml.dom.minidom.parse(topo_xml)
#topo = root.getElementsByTagName(u"topo")[0]
topo = xml_children(root, u'topo')[0]
racks = {}
#lines = ["proc create-topology { } {\n", "\tglobal ns opt\n"]
for rack_group in topo.getElementsByTagName(u"rack_group"):
numrack = len(rack_group.getElementsByTagName(u"rack_index"))
namenode = rack_group.getElementsByTagName(u'name')[0]
name = str(namenode.childNodes[0].nodeValue)
node_group = rack_group.getElementsByTagName(u'compute_node_group')[0]
numnode = len(node_group.getElementsByTagName(u'node_index'))
numswitch = len(rack_group.getElementsByTagName(u'switch_index'))
#lines.append("\tglobal %s\n" % (" ".join([name+'_'+str(i) for i in range(numrack)])))
lines.append("\tfor {set i 0} {$i < %d} {incr i} {\n" % (numrack))
lines.append("\t\tcreate-nodes %s_$i %d\n" % (name, numnode))
lines.append("\t}\n")
if name in racks.keys():
print "error: rack_group name \"%s\" conflict\n" % (name)
connect = [[0]*numswitch for i in range(numrack)]
racks[name] = [name, numrack, numnode, numswitch, connect]
for router in topo.getElementsByTagName(u'router'):
router_name = str(router.getElementsByTagName(u'name')[0].childNodes[0].nodeValue)
lines.append("\tset %s [$ns node]\n" % (router_name))
for group in router.getElementsByTagName(u'connect_to_group'):
switch_index = int(group.getElementsByTagName(u'switch_index')[0].childNodes[0].nodeValue)
rgname = str(group.getElementsByTagName(u'rack_group_name')[0].childNodes[0].nodeValue)
if rgname not in racks.keys():
print "error: rack group name %s not defined\n" % (rgname)
p = racks[rgname]
numrack = p[1]
numnode = p[2]
numswitch = p[3]
connect = p[4]
for i in range(numrack):
rack = connect[i]
if rack[switch_index] <> 0:
print "error: a switch (rack %s[%d] switch %d = %s) connected to multiple routers\n" % (rgname, i, switch_index, repr(rack[switch_index]))
rack[switch_index] = 1 #to indicate it's already written to tcl
#LAN with router
lines.append("\tfor {set i 0} {$i < %s} {incr i} {\n" % (numrack))
lines.append("\t\tcreate-lan $%s %s_$i %d\n" % (router_name, rgname, numnode))
lines.append("\t}\n")
for connect_to in router.getElementsByTagName(u'connect_to'):
print "hello"
lines.append('}\n')
f = open("hadoop.topo.tcl", "w")
f.writelines(lines)
f.close()
#print lines
def main():
usage = "usage: %prog options"
parser = OptionParser(usage)
parser.add_option("-v", "--verbose", default=False,
action="store_true", dest="verbose")
parser.add_option("-t", "--topology", dest="topo_xml",
help="topology configuration xml")
parser.add_option("-m", "--metadata", dest="meta_xml",
help="metadata configuration xml")
parser.add_option("-j", "--job", dest="job_xml",
help="job configuration xml")
parser.add_option("-T", "--topoout", dest="topo_tcl",
help="output tcl file describing topology",
default="hadoop.topo.tcl")
parser.add_option("-J", "--jobout", dest="job_tcl",
help="output tcl file describing job",
default="hadoop.job.tcl")
parser.add_option("-N", "--netsize", dest="netsize",
help="number of routers in the topology")
(options, args) = parser.parse_args()
if None in (options.topo_xml, options.meta_xml, options.job_xml):
print 'xmls not defined'
parser.print_help()
sys.exit()
topo = topology_t(options.topo_xml)
job = job_t(options.job_xml)
topo.totcl(options.topo_tcl, options.netsize)
topo.totcl2('mapnodes.tcl')
'''
f = open(options.job_tcl, 'w')
f.write(job.tcl)
f.close()
'''
if __name__ == "__main__":
main()
|
JanNash/sms-tools | refs/heads/master | lectures/05-Sinusoidal-model/plots-code/spectral-sine-synthesis.py | 24 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft, fftshift
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
Ns = 256
hNs = Ns/2
yw = np.zeros(Ns)
fs = 44100
freqs = np.array([1000.0, 4000.0, 8000.0])
amps = np.array([.6, .4, .6])
phases = ([0.5, 1.2, 2.3])
yploc = Ns*freqs/fs
ypmag = 20*np.log10(amps/2.0)
ypphase = phases
Y = UF.genSpecSines(freqs, ypmag, ypphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(fs*np.arange(Ns/2)/Ns, mY, 'r', lw=1.5)
plt.axis([0, fs/2.0,-100,0])
plt.title("mY, freqs (Hz) = 1000, 4000, 8000; amps = .6, .4, .6")
plt.subplot(3,1,2)
pY[pY==0]= np.nan
plt.plot(fs*np.arange(Ns/2)/Ns, pY, 'c', lw=1.5)
plt.axis([0, fs/2.0,-.01,3.0])
plt.title("pY, phases (radians) = .5, 1.2, 2.3")
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs, hNs), y, 'b', lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)])
plt.title("y")
plt.tight_layout()
plt.savefig('spectral-sine-synthesis.png')
plt.show()
|
plotly/python-api | refs/heads/master | packages/python/plotly/plotly/validators/scattercarpet/marker/gradient/_color.py | 1 | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattercarpet.marker.gradient", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
renecannao/proxysql | refs/heads/master | deps/libinjection/sqlparse2c.py | 8 | #!/usr/bin/env python
#
# Copyright 2012, 2013 Nick Galbreath
# nickg@client9.com
# BSD License -- see COPYING.txt for details
#
"""
Converts a libinjection JSON data file to a C header (.h) file
"""
import sys
def toc(obj):
""" main routine """
print """
#ifndef _LIBINJECTION_SQLI_DATA_H
#define _LIBINJECTION_SQLI_DATA_H
#include "libinjection.h"
#include "libinjection_sqli.h"
typedef struct {
const char *word;
char type;
} keyword_t;
static size_t parse_money(sfilter * sf);
static size_t parse_other(sfilter * sf);
static size_t parse_white(sfilter * sf);
static size_t parse_operator1(sfilter *sf);
static size_t parse_char(sfilter *sf);
static size_t parse_hash(sfilter *sf);
static size_t parse_dash(sfilter *sf);
static size_t parse_slash(sfilter *sf);
static size_t parse_backslash(sfilter * sf);
static size_t parse_operator2(sfilter *sf);
static size_t parse_string(sfilter *sf);
static size_t parse_word(sfilter * sf);
static size_t parse_var(sfilter * sf);
static size_t parse_number(sfilter * sf);
static size_t parse_tick(sfilter * sf);
static size_t parse_ustring(sfilter * sf);
static size_t parse_qstring(sfilter * sf);
static size_t parse_nqstring(sfilter * sf);
static size_t parse_xstring(sfilter * sf);
static size_t parse_bstring(sfilter * sf);
static size_t parse_estring(sfilter * sf);
static size_t parse_bword(sfilter * sf);
"""
#
# Mapping of character to function
#
fnmap = {
'CHAR_WORD' : 'parse_word',
'CHAR_WHITE': 'parse_white',
'CHAR_OP1' : 'parse_operator1',
'CHAR_UNARY': 'parse_operator1',
'CHAR_OP2' : 'parse_operator2',
'CHAR_BANG' : 'parse_operator2',
'CHAR_BACK' : 'parse_backslash',
'CHAR_DASH' : 'parse_dash',
'CHAR_STR' : 'parse_string',
'CHAR_HASH' : 'parse_hash',
'CHAR_NUM' : 'parse_number',
'CHAR_SLASH': 'parse_slash',
'CHAR_SEMICOLON' : 'parse_char',
'CHAR_COMMA': 'parse_char',
'CHAR_LEFTPARENS': 'parse_char',
'CHAR_RIGHTPARENS': 'parse_char',
'CHAR_LEFTBRACE': 'parse_char',
'CHAR_RIGHTBRACE': 'parse_char',
'CHAR_VAR' : 'parse_var',
'CHAR_OTHER': 'parse_other',
'CHAR_MONEY': 'parse_money',
'CHAR_TICK' : 'parse_tick',
'CHAR_UNDERSCORE': 'parse_underscore',
'CHAR_USTRING' : 'parse_ustring',
'CHAR_QSTRING' : 'parse_qstring',
'CHAR_NQSTRING' : 'parse_nqstring',
'CHAR_XSTRING' : 'parse_xstring',
'CHAR_BSTRING' : 'parse_bstring',
'CHAR_ESTRING' : 'parse_estring',
'CHAR_BWORD' : 'parse_bword'
}
print
print "typedef size_t (*pt2Function)(sfilter *sf);"
print "static const pt2Function char_parse_map[] = {"
pos = 0
for character in obj['charmap']:
print " &%s, /* %d */" % (fnmap[character], pos)
pos += 1
print "};"
print
# keywords
# load them
keywords = obj['keywords']
for fingerprint in list(obj[u'fingerprints']):
fingerprint = '0' + fingerprint.upper()
keywords[fingerprint] = 'F'
needhelp = []
for key in keywords.iterkeys():
if key != key.upper():
needhelp.append(key)
for key in needhelp:
tmpv = keywords[key]
del keywords[key]
keywords[key.upper()] = tmpv
print "static const keyword_t sql_keywords[] = {"
for k in sorted(keywords.keys()):
if len(k) > 31:
sys.stderr.write("ERROR: keyword greater than 32 chars\n")
sys.exit(1)
print " {\"%s\", '%s'}," % (k, keywords[k])
print "};"
print "static const size_t sql_keywords_sz = %d;" % (len(keywords), )
print "#endif"
return 0
if __name__ == '__main__':
import json
sys.exit(toc(json.load(sys.stdin)))
|
hubsaysnuaa/odoo | refs/heads/8.0 | addons/web_diagram/controllers/__init__.py | 1214 | from . import main
|
chubbymaggie/barf-project | refs/heads/master | examples/scripts/x86/recover_cfg.py | 1 | #! /usr/bin/env python
import os
import sys
from barf.barf import BARF
if __name__ == "__main__":
#
# Open file
#
try:
filename = os.path.abspath("../../bin/x86/branch4")
barf = BARF(filename)
except Exception as err:
print err
print "[-] Error opening file : %s" % filename
sys.exit(1)
#
# Recover CFG
#
print("[+] Recovering program CFG...")
cfg = barf.recover_cfg(ea_start=0x40052d, ea_end=0x400560)
cfg.save(filename + "_cfg", print_ir=True)
|
valtech-mooc/edx-platform | refs/heads/master | lms/djangoapps/instructor/offline_gradecalc.py | 26 | """
======== Offline calculation of grades =============================================================
Computing grades of a large number of students can take a long time. These routines allow grades to
be computed offline, by a batch process (eg cronjob).
The grades are stored in the OfflineComputedGrade table of the courseware model.
"""
import json
import time
from json import JSONEncoder
from courseware import grades, models
from courseware.courses import get_course_by_id
from django.contrib.auth.models import User
from instructor.utils import DummyRequest
class MyEncoder(JSONEncoder):
def _iterencode(self, obj, markers=None):
if isinstance(obj, tuple) and hasattr(obj, '_asdict'):
gen = self._iterencode_dict(obj._asdict(), markers)
else:
gen = JSONEncoder._iterencode(self, obj, markers)
for chunk in gen:
yield chunk
def offline_grade_calculation(course_key):
'''
Compute grades for all students for a specified course, and save results to the DB.
'''
tstart = time.time()
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_key,
courseenrollment__is_active=1
).prefetch_related("groups").order_by('username')
enc = MyEncoder()
print "{} enrolled students".format(len(enrolled_students))
course = get_course_by_id(course_key)
for student in enrolled_students:
request = DummyRequest()
request.user = student
request.session = {}
gradeset = grades.grade(student, request, course, keep_raw_scores=True)
gs = enc.encode(gradeset)
ocg, _created = models.OfflineComputedGrade.objects.get_or_create(user=student, course_id=course_key)
ocg.gradeset = gs
ocg.save()
print "%s done" % student # print statement used because this is run by a management command
tend = time.time()
dt = tend - tstart
ocgl = models.OfflineComputedGradeLog(course_id=course_key, seconds=dt, nstudents=len(enrolled_students))
ocgl.save()
print ocgl
print "All Done!"
def offline_grades_available(course_key):
'''
Returns False if no offline grades available for specified course.
Otherwise returns latest log field entry about the available pre-computed grades.
'''
ocgl = models.OfflineComputedGradeLog.objects.filter(course_id=course_key)
if not ocgl:
return False
return ocgl.latest('created')
def student_grades(student, request, course, keep_raw_scores=False, use_offline=False):
'''
This is the main interface to get grades. It has the same parameters as grades.grade, as well
as use_offline. If use_offline is True then this will look for an offline computed gradeset in the DB.
'''
if not use_offline:
return grades.grade(student, request, course, keep_raw_scores=keep_raw_scores)
try:
ocg = models.OfflineComputedGrade.objects.get(user=student, course_id=course.id)
except models.OfflineComputedGrade.DoesNotExist:
return dict(
raw_scores=[],
section_breakdown=[],
msg='Error: no offline gradeset available for {}, {}'.format(student, course.id)
)
return json.loads(ocg.gradeset)
|
arkenthera/mpc-hc | refs/heads/develop | src/mpc-hc/mpcresources/sync.py | 8 | # (C) 2015 see Authors.txt
#
# This file is part of MPC-HC.
#
# MPC-HC is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# MPC-HC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import fnmatch
import traceback
from multiprocessing import Pool
from UpdatePOT import *
from UpdatePO import *
from UpdateRC import *
def processRC(file):
ret = file + '\n'
result = True
try:
ret += '--> Updating PO file\n'
UpdatePO(file)
except Exception as e:
ret += ''.join(traceback.format_exception(*sys.exc_info()))
result = False
else:
try:
ret += '--> Updating RC file\n'
UpdateRC(file, False)
except Exception as e:
ret += ''.join(traceback.format_exception(*sys.exc_info()))
result = False
ret += '----------------------'
return result, ret
if __name__ == '__main__':
print 'Updating POT file'
UpdatePOT()
print '----------------------'
pool = Pool()
results = []
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*.rc'):
results.append(pool.apply_async(processRC, [os.path.splitext(file)[0]]));
pool.close()
for result in results:
ret = result.get(True)
print ret[1]
if (not ret[0]):
os.system('pause')
|
zdary/intellij-community | refs/heads/master | python/testData/completion/className/simple/simple.py | 166 | Shaz<caret> |
jarshwah/django | refs/heads/master | django/contrib/auth/views.py | 9 | import functools
import warnings
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango21Warning,
)
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.six.moves.urllib.parse import urlparse, urlunparse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
def deprecate_current_app(func):
"""
Handle deprecation of the current_app parameter of the views.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
if 'current_app' in kwargs:
warnings.warn(
"Passing `current_app` as a keyword argument is deprecated. "
"Instead the caller of `{0}` should set "
"`request.current_app`.".format(func.__name__),
RemovedInDjango20Warning
)
current_app = kwargs.pop('current_app')
request = kwargs.get('request', None)
if request and current_app is not None:
request.current_app = current_app
return func(*args, **kwargs)
return inner
class SuccessURLAllowedHostsMixin(object):
success_url_allowed_hosts = set()
def get_success_url_allowed_hosts(self):
allowed_hosts = {self.request.get_host()}
allowed_hosts.update(self.success_url_allowed_hosts)
return allowed_hosts
class LoginView(SuccessURLAllowedHostsMixin, FormView):
"""
Displays the login form and handles the login action.
"""
form_class = AuthenticationForm
authentication_form = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/login.html'
redirect_authenticated_user = False
extra_context = None
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super(LoginView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
"""Ensure the user-originating redirection URL is safe."""
redirect_to = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name, '')
)
url_is_safe = is_safe_url(
url=redirect_to,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
if not url_is_safe:
return resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
def get_form_class(self):
return self.authentication_form or self.form_class
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
self.redirect_field_name: self.get_success_url(),
'site': current_site,
'site_name': current_site.name,
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
@deprecate_current_app
def login(request, *args, **kwargs):
warnings.warn(
'The login() view is superseded by the class-based LoginView().',
RemovedInDjango21Warning, stacklevel=2
)
return LoginView.as_view(**kwargs)(request, *args, **kwargs)
class LogoutView(SuccessURLAllowedHostsMixin, TemplateView):
"""
Logs out the user and displays 'You are logged out' message.
"""
next_page = None
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/logged_out.html'
extra_context = None
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
auth_logout(request)
next_page = self.get_next_page()
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
return super(LogoutView, self).dispatch(request, *args, **kwargs)
def get_next_page(self):
if self.next_page is not None:
next_page = resolve_url(self.next_page)
elif settings.LOGOUT_REDIRECT_URL:
next_page = resolve_url(settings.LOGOUT_REDIRECT_URL)
else:
next_page = self.next_page
if (self.redirect_field_name in self.request.POST or
self.redirect_field_name in self.request.GET):
next_page = self.request.POST.get(
self.redirect_field_name,
self.request.GET.get(self.redirect_field_name)
)
url_is_safe = is_safe_url(
url=next_page,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
# Security check -- Ensure the user-originating redirection URL is
# safe.
if not url_is_safe:
next_page = self.request.path
return next_page
def get_context_data(self, **kwargs):
context = super(LogoutView, self).get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update({
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out'),
})
if self.extra_context is not None:
context.update(self.extra_context)
return context
@deprecate_current_app
def logout(request, *args, **kwargs):
warnings.warn(
'The logout() view is superseded by the class-based LogoutView().',
RemovedInDjango21Warning, stacklevel=2
)
return LogoutView.as_view(**kwargs)(request, *args, **kwargs)
_sentinel = object()
@deprecate_current_app
def logout_then_login(request, login_url=None, extra_context=_sentinel):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
if extra_context is not _sentinel:
warnings.warn(
"The unused `extra_context` parameter to `logout_then_login` "
"is deprecated.", RemovedInDjango21Warning
)
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return LogoutView.as_view(next_page=login_url)(request)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@deprecate_current_app
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
warnings.warn("The password_reset() view is superseded by the "
"class-based PasswordResetView().",
RemovedInDjango21Warning, stacklevel=2)
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
warnings.warn("The password_reset_done() view is superseded by the "
"class-based PasswordResetDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
@deprecate_current_app
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
warnings.warn("The password_reset_confirm() view is superseded by the "
"class-based PasswordResetConfirmView().",
RemovedInDjango21Warning, stacklevel=2)
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
warnings.warn("The password_reset_complete() view is superseded by the "
"class-based PasswordResetCompleteView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Class-based password reset views
# - PasswordResetView sends the mail
# - PasswordResetDoneView shows a success message for the above
# - PasswordResetConfirmView checks the link the user clicked and
# prompts for a new password
# - PasswordResetCompleteView shows a success message for the above
class PasswordContextMixin(object):
extra_context = None
def get_context_data(self, **kwargs):
context = super(PasswordContextMixin, self).get_context_data(**kwargs)
context['title'] = self.title
if self.extra_context is not None:
context.update(self.extra_context)
return context
class PasswordResetView(PasswordContextMixin, FormView):
email_template_name = 'registration/password_reset_email.html'
extra_email_context = None
form_class = PasswordResetForm
from_email = None
html_email_template_name = None
subject_template_name = 'registration/password_reset_subject.txt'
success_url = reverse_lazy('password_reset_done')
template_name = 'registration/password_reset_form.html'
title = _('Password reset')
token_generator = default_token_generator
@method_decorator(csrf_protect)
def dispatch(self, *args, **kwargs):
return super(PasswordResetView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
opts = {
'use_https': self.request.is_secure(),
'token_generator': self.token_generator,
'from_email': self.from_email,
'email_template_name': self.email_template_name,
'subject_template_name': self.subject_template_name,
'request': self.request,
'html_email_template_name': self.html_email_template_name,
'extra_email_context': self.extra_email_context,
}
form.save(**opts)
return super(PasswordResetView, self).form_valid(form)
class PasswordResetDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_done.html'
title = _('Password reset sent')
class PasswordResetConfirmView(PasswordContextMixin, FormView):
form_class = SetPasswordForm
post_reset_login = False
success_url = reverse_lazy('password_reset_complete')
template_name = 'registration/password_reset_confirm.html'
title = _('Enter new password')
token_generator = default_token_generator
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
assert 'uidb64' in kwargs and 'token' in kwargs
return super(PasswordResetConfirmView, self).dispatch(*args, **kwargs)
def get_user(self, uidb64):
UserModel = get_user_model()
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
return user
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.get_user(self.kwargs['uidb64'])
return kwargs
def form_valid(self, form):
user = form.save()
if self.post_reset_login:
auth_login(self.request, user)
return super(PasswordResetConfirmView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(PasswordResetConfirmView, self).get_context_data(**kwargs)
user = context['form'].user
if user is not None and self.token_generator.check_token(user, self.kwargs['token']):
context['validlink'] = True
else:
context.update({
'form': None,
'title': _('Password reset unsuccessful'),
'validlink': False,
})
return context
class PasswordResetCompleteView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_reset_complete.html'
title = _('Password reset complete')
def get_context_data(self, **kwargs):
context = super(PasswordResetCompleteView, self).get_context_data(**kwargs)
context['login_url'] = resolve_url(settings.LOGIN_URL)
return context
@sensitive_post_parameters()
@csrf_protect
@login_required
@deprecate_current_app
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
extra_context=None):
warnings.warn("The password_change() view is superseded by the "
"class-based PasswordChangeView().",
RemovedInDjango21Warning, stacklevel=2)
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@login_required
@deprecate_current_app
def password_change_done(request,
template_name='registration/password_change_done.html',
extra_context=None):
warnings.warn("The password_change_done() view is superseded by the "
"class-based PasswordChangeDoneView().",
RemovedInDjango21Warning, stacklevel=2)
context = {
'title': _('Password change successful'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
class PasswordChangeView(PasswordContextMixin, FormView):
form_class = PasswordChangeForm
success_url = reverse_lazy('password_change_done')
template_name = 'registration/password_change_form.html'
title = _('Password change')
@method_decorator(sensitive_post_parameters())
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PasswordChangeView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(self.request, form.user)
return super(PasswordChangeView, self).form_valid(form)
class PasswordChangeDoneView(PasswordContextMixin, TemplateView):
template_name = 'registration/password_change_done.html'
title = _('Password change successful')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(PasswordChangeDoneView, self).dispatch(*args, **kwargs)
|
stanford-mast/nn_dataflow | refs/heads/master | nn_dataflow/tests/tool_test/test_nn_dataflow_search.py | 1 | """ $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
import os
import subprocess
class TestNNDataflowSearch(unittest.TestCase):
''' Tests for NN dataflow search tool. '''
def setUp(self):
cwd = os.path.dirname(os.path.abspath(__file__))
self.cwd = os.path.join(cwd, '..', '..', '..')
self.assertTrue(os.path.isdir(self.cwd))
self.assertTrue(os.path.isdir(
os.path.join(self.cwd, 'nn_dataflow', 'tools')))
self.args = ['python3', '-m', 'nn_dataflow.tools.nn_dataflow_search',
'alex_net', '--batch', '1',
'--node', '1', '1', '--array', '16', '16',
'--regf', '512', '--gbuf', '131072']
def test_default_invoke(self):
''' Default invoke. '''
ret = self._call(self.args)
self.assertEqual(ret, 0)
def test_3d_mem(self):
''' With 3D memory. '''
ret = self._call(self.args + ['--mem-type', '3D'])
self.assertEqual(ret, 0)
def test_no_dataflow(self):
''' No dataflow scheme found. '''
args = self.args[:]
args[args.index('--gbuf') + 1] = '2'
args += ['--disable-bypass', 'i', 'o', 'f']
ret = self._call(args)
self.assertEqual(ret, 2)
def _call(self, args):
with open(os.devnull, 'w') as output:
result = subprocess.call(args, cwd=self.cwd,
stderr=subprocess.STDOUT,
stdout=output)
return result
|
gordon1992/BallGame | refs/heads/master | src/mainGame.py | 1 | '''
Project was conceptualised formally on the 3rd of November 2010
Last updated 12th of January 2011 by Gordon Reid
@title: Ball Game
@author: Gordon Reid - gordon.reid1992@hotmail.co.uk
This code is freely available for anybody to view and edit.
I request that if any code is used that credit is given in an appropriate
fashion (a.k.a my name mentioned in a section similar to this and visible
in the user interface).
Disclaimer:
I, me (and similar) all reference to Gordon Reid
(gordon.reid1992@hotmail.co.uk) unless explicitly stated.
I have not intentionally stolen code, this is all my own work and as a result
the property (programming projects) are solely owned by me.
I accept no responsibility for any damages which may occur from the code.
The code has been tested on my own computer systems running
Ubuntu 10.04 Netbook Edition and Ubuntu 10.10 Desktop Edition x64 without
causing any damage (to files or otherwise).
The code is run at the users own risk.
No support on the use on any of the the projects will be available.
'''
try:
# Python 3.x
from tkinter import * # @UnusedWildImport
from tkinter.messagebox import showerror
except ImportError:
# Python 2.x
from Tkinter import * # @UnusedWildImport
from tkMessageBox import showerror
from os import listdir
from random import randrange
from traceback import print_exc
bonusTrigger = False
count = 0
xMin = 10
yMin = 0
xMax = 790
yMax = 370
class ball():
# Ball object class
def __init__(self):
self.diameter = 20
self.hitTop = False
self.isHuman = True
self.isPlaying = False
self.powerup = None
self.powerupTime = 0
self.score = 0
self.speed = 8
self.oldSpeed = 8
self.userName = ""
self.xPos = 20
self.yPos = 20
self.moveLeft = False
self.moveRight = False
def ballGame(computerPlaying, computerLevel, userName, canvas, mainGameWindow):
# Set starting variables and decide which picture to show as
# the game background (bonusTrigger and count here just in case
# user quit game midway last time).
global bonusTrigger, count
bonusTrigger = False
success = True
count = 0
level = 0
try:
dirList = listdir("backgrounds/")
except Exception:
success = False
print_exc(file=open("errlog.txt", "a"))
showerror("Wrong directory", "You appear to have forgotten to cd to " \
+ "the correct directory. A traceback has been written to " \
+ "errlog.txt in the program's directory.")
if success:
photos = []
# Find *.gif files in background directory
for element in dirList:
if str(element)[-4:] == ".gif":
photos += [element]
randomNumber = randrange(0, len(photos))
whichPicture = "backgrounds/" + str(photos[randomNumber])
try:
photo1 = PhotoImage(file=whichPicture)
canvas.create_image((xMin + xMax) / 2, \
(yMin + yMax) / 2, image=photo1)
except Exception:
print_exc(file=open("errlog.txt", "a"))
showerror("Error - Failed to load image", "I could not load the " \
+ "background images for the game environment. The game will " \
+ "still load, just with a blank background. A traceback has " \
+ "been written to errlog.txt in the program's directory.")
# Define specific values for each ball and make the ball objects global
# (for call back only, otherwise pass variables in).
global playerOne, playerTwo
playerOne = ball()
playerOne.isPlaying = True
playerOne.userName = userName
playerTwo = ball()
if computerPlaying == True:
playerTwo.isHuman = False
playerTwo.isPlaying = True
if computerLevel == "easy":
playerTwo.speed = 2
playerTwo.oldSpeed = 2
elif computerLevel == "medium":
playerTwo.speed = 4
playerTwo.oldSpeed = 4
elif computerLevel == "difficult":
playerTwo.speed = 6
playerTwo.oldSpeed = 6
elif computerLevel == "uber":
playerTwo.speed = 8
playerTwo.oldSpeed = 8
# Define specific values for each ball if the user name is "TechDemo".
if playerOne.userName == "TechDemo":
playerOne.speed = 15
playerOne.oldSpeed = 15
playerOne.isHuman = False
if computerLevel == "alone":
playerTwo.isPlaying = False
else:
playerTwo.isHuman = False
playerTwo.isPlaying = True
# Display initial score and level.
playerOneText = ("Score " + str(playerOne.score) \
+ " and on level " + str(level))
theFont = "Calibri", 12
playerOneInformation = canvas.create_text(400, 385, \
text=playerOneText, font=theFont)
powerupText = "You currently have no powerups."
powerupInformation = canvas.create_text(400, 405, \
text=powerupText, font=theFont)
# Display count down from 3 prior to starting the game.
massiveFont = "Calibri", 200
for number in range(3):
counter = canvas.create_text(390, 185, text=str(3 - number), \
font=massiveFont, fill="white")
canvas.update()
canvas.after(1000)
canvas.delete(counter)
canvas.delete(playerOneInformation)
canvas.delete(powerupInformation)
# Keep playing the game until one of the balls in
# play hits the top of the game area.
while playerOne.hitTop == False and playerTwo.hitTop == False:
playerOne, playerTwo, level = drawLine(playerOne, \
playerTwo, canvas, level)
gameOver(computerLevel, playerOne, playerTwo, computerPlaying, \
canvas, level, mainGameWindow)
def gameOver(computerLevel, playerOne, playerTwo, computerPlaying, \
canvas, level, mainGameWindow):
theFont = "Calibri", 12
# If player one lost against player two:
if playerOne.hitTop == True and playerTwo.hitTop == False \
and playerTwo.isPlaying == True:
if computerLevel == "easy" or computerLevel == "uber":
canvas.create_text(400, 385, text=("Game over, you lost. " \
+ "Your final score is " + str(playerOne.score) \
+ ". You reached level " + str(level) + " against an " \
+ str(computerLevel) + " computer."), width=780, font=theFont)
elif computerLevel == "medium" or computerLevel == "difficult":
canvas.create_text(400, 385, text=("Game over, you lost. " \
+ "Your final score is " + str(playerOne.score) \
+ ". You reached level " + str(level) + " against a " \
+ str(computerLevel) + " computer."), width=780, font=theFont)
# If only player one was playing:
if playerOne.hitTop == True and computerLevel == "alone":
canvas.create_text(400, 385, text=("Game over. Your final score is " \
+ str(playerOne.score) + ". You reached a level of " \
+ str(level) + "."), width=780, font=theFont)
# If player two lost
if playerTwo.hitTop == True:
playerOne.score += 20000
canvas.create_text(400, 385, text=("Game over, you won! " \
+ "Your final score is " + str(playerOne.score) \
+ ". You reached level " + str(level) + "."), width=780, font=theFont)
# Show the newly created text for five seconds then
# destroy the window and save the score.
canvas.update()
canvas.after(5000)
canvas.destroy()
writeScore(playerOne, computerLevel, computerPlaying, mainGameWindow)
def writeScore(playerOne, computerLevel, computerPlaying, mainGameWindow):
filename = "resources/scores.txt"
# Work out what to save to the text file
if computerPlaying == False:
nameAndScore = str(playerOne.userName) + \
"@" + str(playerOne.score) + "@alone"
elif computerPlaying == True and (computerLevel == "easy" or \
computerLevel == "uber"):
nameAndScore = str(playerOne.userName) + \
"@" + str(playerOne.score) + "@" + str(computerLevel)
else:
nameAndScore = str(playerOne.userName) + \
"@" + str(playerOne.score) + "@" + str(computerLevel)
# Convert the variable to be saved to string, remove punctuation,
# add a new line character then try and save it to the specified file.
nameAndScore = str(nameAndScore)
nameAndScore += "\n"
try:
file = open(filename, "a")
file.writelines(nameAndScore)
file.writelines("")
file.close()
except Exception:
print_exc(file=open("errlog.txt", "a"))
showerror("Error - Failed to write score", "I could not your score " \
+ "to the scores file. A traceback has been written to errlog.txt "\
+ "in the program's directory.")
mainGameWindow.destroy()
def drawLine(playerOne, playerTwo, canvas, level):
# Set values for count, starting yPos for line and
# xPos for the gap in the line.
global count
count = count + 1
yLine = yMax
gapX = randrange(xMin, xMax - (3 * playerOne.diameter), playerOne.diameter)
interval = int(1000 / 120)
# Power up on new level if computer is playing.
if count > 0 and count % 5 == 0 and playerOne.powerupTime == 0:
playerOne = findPowerup(playerOne, playerTwo)
elif count > 0 and count % 5 == 0 and playerOne.powerupTime != 0:
playerOne.powerupTime += 1000
# Until the line goes off the top of the screen:
while yLine > yMin:
level = int(count / 5) + 1
# Call appropriate function based on whether or not
# the player is human or computer.
if playerOne.isPlaying == True and playerOne.isHuman == True:
playerOne = humanMove(playerOne, yLine, gapX)
if playerOne.isPlaying == True and playerOne.isHuman == False:
playerOne = computerMove(playerOne, yLine, gapX)
if playerTwo.isPlaying == True and playerTwo.isHuman == True:
playerTwo = humanMove(playerTwo, yLine, gapX)
if playerTwo.isPlaying == True and playerTwo.isHuman == False:
playerTwo = computerMove(playerTwo, yLine, gapX)
# If both balls are at the bottom of the game area and are fully
# overlapping, move player one by half a diameter to the right.
if playerOne.xPos == playerTwo.xPos \
and playerOne.yPos + playerOne.diameter == yMax \
and playerTwo.yPos + playerTwo.diameter == yMax:
playerOne.xPos += int(playerOne.diameter / 2)
# Limit the speed the line moves up at to 15px per cycle.
if level > 15:
level = 15
# Reduce time left for powerup.
if playerOne.powerupTime >= 0:
playerOne.powerupTime -= interval
# When powerup has finished, go back to old speeds.
if playerOne.powerupTime < 0:
playerOne.powerupTime = 0
playerOne.powerup = None
playerOne.speed = playerOne.oldSpeed
playerTwo.speed = playerTwo.oldSpeed
# Create image of the player(s) ball(s) if they are playing.
if playerOne.isPlaying == True:
ball = canvas.create_oval(playerOne.xPos, playerOne.yPos, \
playerOne.xPos + playerOne.diameter, \
playerOne.yPos + playerOne.diameter, fill="red")
if playerTwo.isPlaying == True:
ball2 = canvas.create_oval(playerTwo.xPos, playerTwo.yPos, \
playerTwo.xPos + playerTwo.diameter, \
playerTwo.yPos + playerTwo.diameter, fill="blue")
# Player one receives a score bonus if there ball is lower
# down the game area than player two.
if playerOne.isPlaying == True and playerTwo.isPlaying == True \
and playerOne.yPos > playerTwo.yPos:
playerOne.score += level
# Create both sections of the line.
line = canvas.create_line(xMin, yLine, gapX, yLine, fill="white", \
width=6, stipple='questhead')
line2 = canvas.create_line(gapX + 3 * playerOne.diameter, yLine, \
xMax, yLine, fill="white", width=6, stipple='questhead')
# Display player one's score and how fast the line is moving.
theFont = "Calibri", 12
playerOneText = ("Score " + str(playerOne.score) \
+ " and on level " + str(level))
playerOneInformation = canvas.create_text(400, 385, \
text=playerOneText, font=theFont)
# Tell player if they have a powerup
if playerOne.powerupTime != 0:
time = int(playerOne.powerupTime / 1000) + 1
else:
time = 0
if playerOne.powerup != None and time != 1:
powerupText = ("You currently have the powerup: " \
+ str(playerOne.powerup) + " for " + str(time) + " seconds.")
powerupInformation = canvas.create_text(400, 405, \
text=powerupText, font=theFont)
elif time == 1:
powerupText = ("You currently have the powerup: " \
+ str(playerOne.powerup) + " for " + str(time) + " second.")
powerupInformation = canvas.create_text(400, 405, \
text=powerupText, font=theFont)
else:
powerupText = "You currently have no powerups."
powerupInformation = canvas.create_text(400, 405, \
text=powerupText, font=theFont)
canvas.update()
canvas.after(interval)
playerOne.score += level
yLine -= int(level / 2) + 1
# Delete all objects created in this iteration of the loop.
canvas.delete(line)
canvas.delete(line2)
canvas.delete(playerOneInformation)
canvas.delete(powerupInformation)
if playerOne.isPlaying == True:
canvas.delete(ball)
if playerTwo.isPlaying == True:
canvas.delete(ball2)
# If either the line or one of the balls has gone off the
# top of the game area, quit the loop.
if yLine <= yMin or playerOne.hitTop == True or \
playerTwo.hitTop == True:
return playerOne, playerTwo, level
def findPowerup(playerOne, playerTwo):
filename = "resources/powerups.txt"
powerupsAvailable = []
try:
file = open(filename, "r")
except Exception:
file = ""
print_exc(file=open("errlog.txt", "a"))
showerror("Error - Failed to load powerups", "I could not load the " \
+ "powerups because powerups.txt could not be found. A traceback has "\
+ "been written to errlog.txt in the program's directory.")
# Add floating point numbers to powerupsAvailable list
for line in file:
powerupsAvailable += [float(str(line[:-1]))]
# Obtain a random powerup
number = randrange(0, len(powerupsAvailable))
playerOne.powerup = powerupsAvailable[number]
# If the value would result in a speed increase, give it to the user.
if playerOne.powerup > 1:
playerOne.speed = int(playerOne.speed * playerOne.powerup)
playerOne.powerupTime = 5000 * (playerOne.powerup - 1)
playerOne.powerup = "speed you up to " \
+ str(powerupsAvailable[number]) + "x speed"
# If the value would result in a speed decrease, give it to the computer.
elif playerOne.powerup < 1 and playerTwo.isPlaying == True:
playerTwo.speed = int(playerTwo.speed * playerOne.powerup)
playerOne.powerupTime = 5000 * playerOne.powerup
playerOne.powerup = "slow computer down to "\
+ str(powerupsAvailable[number]) + "x speed"
# If the value wouldn't result in a speed change, add 1000 to user's score.
elif playerOne.powerup == 1:
playerOne.score += 1000
playerOne.powerupTime = 0
playerOne.powerup = "score bonus of 1000"
return playerOne
def humanMove(ball, yLine, gapX):
# Let the ball fall down by 10px per cycle.
ball.yPos += 10
# If ball is off the left side of the game area then move it back inside.
if ball.xPos < xMin:
ball.xPos = xMin
ball.moveLeft = False
if ball.moveLeft and ball.isHuman and ball.isPlaying:
ball.xPos -= ball.speed
if ball.moveRight and ball.isHuman and ball.isPlaying:
ball.xPos += ball.speed
# If ball is off the right side of the game area then move it back inside.
if ball.xPos + ball.diameter > xMax:
ball.xPos = xMax - ball.diameter
ball.moveRight = False
# If ball is off the top side of the game area then game is over.
if ball.yPos <= yMin:
ball.hitTop = True
# If ball is not over the gap then make it sit on the line
# (unless below the line by more than 15px).
if ball.yPos + ball.diameter >= yLine - 3 \
and ball.yPos + ball.diameter <= yLine + 15 \
and (ball.xPos < gapX \
or ball.xPos + ball.diameter > (gapX + (3 * ball.diameter))):
ball.yPos = yLine - ball.diameter - 3
# If ball is off the bottom of the game area then move it back inside.
if ball.yPos + ball.diameter >= yMax:
ball.yPos = yMax - ball.diameter
return ball
def computerMove(ball, yLine, gapX):
# Detection of whether or not the ball is out of the game area
# is the same for computer and human balls.
ball = humanMove(ball, yLine, gapX)
# If ball is left of gap, move it closer to the gap.
if ball.xPos <= gapX:
ball.xPos += ball.speed
# If ball is right of the gap, move it closer to the gap.
if ball.xPos + ball.diameter >= (gapX + 3 * ball.diameter):
ball.xPos -= ball.speed
return ball
def callback(event):
# Move appropriate ball based on which key was pressed.
if event.keysym == "Left" and playerOne.isHuman == True:
playerOne.moveLeft = True
playerOne.moveRight = False
if event.keysym == "Right" and playerOne.isHuman == True:
playerOne.moveLeft = False
playerOne.moveRight = True
if event.keysym == "Down" and playerOne.isHuman == True:
playerOne.moveLeft = False
playerOne.moveRight = False
def play(userName, computerPlaying, computerLevel):
# Define details for the window (size and menu options).
global mainGameWindow
menuFont = "Calibri", 12
mainGameWindow = Toplevel(takefocus=True)
mainGameWindow.title('Ball Game - Main Game')
mainGameWindow.minsize(800, 500)
mainGameWindow.maxsize(800, 500)
mainGameWindow.geometry = mainGameWindow.minsize
mainGameWindow.resizable(0, 0)
menubar = Menu(mainGameWindow)
menubar.add_command(label="Quit", command=mainGameWindow.destroy, \
font=menuFont)
mainGameWindow.config(menu=menubar)
# Define details for the canvas widget which sits inside the
# window (size and which function to call on key press).
canvas = Canvas(mainGameWindow, width=800, height=500,)
canvas.bind_all('<Key>', callback)
canvas.pack()
ballGame(computerPlaying, computerLevel, userName, canvas, mainGameWindow)
|
youprofit/scikit-image | refs/heads/master | skimage/transform/pyramids.py | 20 | import math
import numpy as np
from scipy import ndimage as ndi
from ..transform import resize
from ..util import img_as_float
def _smooth(image, sigma, mode, cval):
"""Return image with each channel smoothed by the Gaussian filter."""
smoothed = np.empty(image.shape, dtype=np.double)
# apply Gaussian filter to all dimensions independently
if image.ndim == 3:
for dim in range(image.shape[2]):
ndi.gaussian_filter(image[..., dim], sigma,
output=smoothed[..., dim],
mode=mode, cval=cval)
else:
ndi.gaussian_filter(image, sigma, output=smoothed,
mode=mode, cval=cval)
return smoothed
def _check_factor(factor):
if factor <= 1:
raise ValueError('scale factor must be greater than 1')
def pyramid_reduce(image, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Smooth and then downsample image.
Parameters
----------
image : array
Input image.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
out : array
Smoothed and downsampled float image.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
image = img_as_float(image)
rows = image.shape[0]
cols = image.shape[1]
out_rows = math.ceil(rows / float(downscale))
out_cols = math.ceil(cols / float(downscale))
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
smoothed = _smooth(image, sigma, mode, cval)
out = resize(smoothed, (out_rows, out_cols), order=order,
mode=mode, cval=cval)
return out
def pyramid_expand(image, upscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Upsample and then smooth image.
Parameters
----------
image : array
Input image.
upscale : float, optional
Upscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * upscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of upsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
out : array
Upsampled and smoothed float image.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(upscale)
image = img_as_float(image)
rows = image.shape[0]
cols = image.shape[1]
out_rows = math.ceil(upscale * rows)
out_cols = math.ceil(upscale * cols)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * upscale / 6.0
resized = resize(image, (out_rows, out_cols), order=order,
mode=mode, cval=cval)
out = _smooth(resized, sigma, mode, cval)
return out
def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Yield images of the Gaussian pyramid formed by the input image.
Recursively applies the `pyramid_reduce` function to the image, and yields
the downscaled images.
Note that the first image of the pyramid will be the original, unscaled
image. The total number of images is `max_layer + 1`. In case all layers
are computed, the last image is either a one-pixel image or the image where
the reduction does not change its shape.
Parameters
----------
image : array
Input image.
max_layer : int
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = img_as_float(image)
layer = 0
rows = image.shape[0]
cols = image.shape[1]
prev_layer_image = image
yield image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
layer_image = pyramid_reduce(prev_layer_image, downscale, sigma, order,
mode, cval)
prev_rows = rows
prev_cols = cols
prev_layer_image = layer_image
rows = layer_image.shape[0]
cols = layer_image.shape[1]
# no change to previous pyramid layer
if prev_rows == rows and prev_cols == cols:
break
yield layer_image
def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0):
"""Yield images of the laplacian pyramid formed by the input image.
Each layer contains the difference between the downsampled and the
downsampled, smoothed image::
layer = resize(prev_layer) - smooth(resize(prev_layer))
Note that the first image of the pyramid will be the difference between the
original, unscaled image and its smoothed version. The total number of
images is `max_layer + 1`. In case all layers are computed, the last image
is either a one-pixel image or the image where the reduction does not
change its shape.
Parameters
----------
image : array
Input image.
max_layer : int
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://web.mit.edu/persci/people/adelson/pub_pdfs/pyramid83.pdf
.. [2] http://sepwww.stanford.edu/~morgan/texturematch/paper_html/node3.html
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = img_as_float(image)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
layer = 0
rows = image.shape[0]
cols = image.shape[1]
smoothed_image = _smooth(image, sigma, mode, cval)
yield image - smoothed_image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
out_rows = math.ceil(rows / float(downscale))
out_cols = math.ceil(cols / float(downscale))
resized_image = resize(smoothed_image, (out_rows, out_cols),
order=order, mode=mode, cval=cval)
smoothed_image = _smooth(resized_image, sigma, mode, cval)
prev_rows = rows
prev_cols = cols
rows = resized_image.shape[0]
cols = resized_image.shape[1]
# no change to previous pyramid layer
if prev_rows == rows and prev_cols == cols:
break
yield resized_image - smoothed_image
|
Xykon/pycom-micropython-sigfox | refs/heads/master | tests/basics/class3.py | 118 | # inheritance
class A:
def a():
print('A.a() called')
class B(A):
pass
print(type(A))
print(type(B))
print(issubclass(A, A))
print(issubclass(A, B))
print(issubclass(B, A))
print(issubclass(B, B))
print(isinstance(A(), A))
print(isinstance(A(), B))
print(isinstance(B(), A))
print(isinstance(B(), B))
A.a()
B.a()
|
ujenmr/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_host_acceptance.py | 48 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_acceptance
short_description: Manage the host acceptance level of an ESXi host
description:
- This module can be used to manage the host acceptance level of an ESXi host.
- The host acceptance level controls the acceptance level of each VIB on a ESXi host.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Acceptance level of all ESXi host system in the given cluster will be managed.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Acceptance level of this ESXi host system will be managed.
- If C(cluster_name) is not given, this parameter is required.
state:
description:
- Set or list acceptance level of the given ESXi host.
- 'If set to C(list), then will return current acceptance level of given host system/s.'
- If set to C(present), then will set given acceptance level.
choices: [ list, present ]
required: False
default: 'list'
acceptance_level:
description:
- Name of acceptance level.
- If set to C(partner), then accept only partner and VMware signed and certified VIBs.
- If set to C(vmware_certified), then accept only VIBs that are signed and certified by VMware.
- If set to C(vmware_accepted), then accept VIBs that have been accepted by VMware.
- If set to C(community), then accept all VIBs, even those that are not signed.
choices: [ community, partner, vmware_accepted, vmware_certified ]
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set acceptance level to community for all ESXi Host in given Cluster
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
acceptance_level: 'community'
state: present
delegate_to: localhost
register: cluster_acceptance_level
- name: Set acceptance level to vmware_accepted for the given ESXi Host
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
acceptance_level: 'vmware_accepted'
state: present
delegate_to: localhost
register: host_acceptance_level
- name: Get acceptance level from the given ESXi Host
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: list
delegate_to: localhost
register: host_acceptance_level
'''
RETURN = r'''
facts:
description:
- dict with hostname as key and dict with acceptance level facts, error as value
returned: facts
type: dict
sample: { "facts": { "localhost.localdomain": { "error": "NA", "level": "vmware_certified" }}}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VMwareAccpetanceManager(PyVmomi):
def __init__(self, module):
super(VMwareAccpetanceManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.desired_state = self.params.get('state')
self.hosts_facts = {}
self.acceptance_level = self.params.get('acceptance_level')
def gather_acceptance_facts(self):
for host in self.hosts:
self.hosts_facts[host.name] = dict(level='', error='NA')
host_image_config_mgr = host.configManager.imageConfigManager
if host_image_config_mgr:
try:
self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
except vim.fault.HostConfigFault as e:
self.hosts_facts[host.name]['error'] = to_native(e.msg)
def set_acceptance_level(self):
change = []
for host in self.hosts:
host_changed = False
if self.hosts_facts[host.name]['level'] != self.acceptance_level:
host_image_config_mgr = host.configManager.imageConfigManager
if host_image_config_mgr:
try:
if self.module.check_mode:
self.hosts_facts[host.name]['level'] = self.acceptance_level
else:
host_image_config_mgr.UpdateHostImageAcceptanceLevel(newAcceptanceLevel=self.acceptance_level)
self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
host_changed = True
except vim.fault.HostConfigFault as e:
self.hosts_facts[host.name]['error'] = to_native(e.msg)
change.append(host_changed)
self.module.exit_json(changed=any(change), facts=self.hosts_facts)
def check_acceptance_state(self):
self.gather_acceptance_facts()
if self.desired_state == 'list':
self.module.exit_json(changed=False, facts=self.hosts_facts)
self.set_acceptance_level()
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
acceptance_level=dict(type='str',
choices=['community', 'partner', 'vmware_accepted', 'vmware_certified']
),
state=dict(type='str',
choices=['list', 'present'],
default='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
required_if=[
['state', 'present', ['acceptance_level']],
],
supports_check_mode=True
)
vmware_host_accept_config = VMwareAccpetanceManager(module)
vmware_host_accept_config.check_acceptance_state()
if __name__ == "__main__":
main()
|
drxos/python-social-auth | refs/heads/master | examples/flask_example/routes/__init__.py | 63 | from flask_example.routes import main
from social.apps.flask_app import routes
|
jerbob92/CouchPotatoServer | refs/heads/master | couchpotato/core/providers/userscript/filmweb/main.py | 21 | from couchpotato.core.providers.userscript.base import UserscriptBase
import re
class Filmweb(UserscriptBase):
version = 2
includes = ['http://www.filmweb.pl/film/*']
def getMovie(self, url):
cookie = {'Cookie': 'welcomeScreen=welcome_screen'}
try:
data = self.urlopen(url, headers = cookie)
except:
return
name = re.search("<h2.*?class=\"text-large caption\">(?P<name>[^<]+)</h2>", data)
if name is None:
name = re.search("<a.*?property=\"v:name\".*?>(?P<name>[^<]+)</a>", data)
name = name.group('name').decode('string_escape')
year = re.search("<span.*?id=filmYear.*?>\((?P<year>[^\)]+)\).*?</span>", data)
year = year.group('year')
return self.search(name, year)
|
jlegendary/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/optionaltags.py | 1727 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
madan96/sympy | refs/heads/master | sympy/physics/pring.py | 94 | from __future__ import print_function, division
from sympy import sqrt, exp, S, pi, I
from sympy.physics.quantum.constants import hbar
def wavefunction(n, x):
"""
Returns the wavefunction for particle on ring.
n is the quantum number, x is the angle,
here n can be positive as well as negative
which can be used to describe the direction of motion of particle
Examples
========
>>> from sympy.physics.pring import wavefunction, energy
>>> from sympy import Symbol, integrate, pi
>>> x=Symbol("x")
>>> wavefunction(1, x)
sqrt(2)*exp(I*x)/(2*sqrt(pi))
>>> wavefunction(2, x)
sqrt(2)*exp(2*I*x)/(2*sqrt(pi))
>>> wavefunction(3, x)
sqrt(2)*exp(3*I*x)/(2*sqrt(pi))
The normalization of the wavefunction is:
>>> integrate(wavefunction(2, x)*wavefunction(-2, x), (x, 0, 2*pi))
1
>>> integrate(wavefunction(4, x)*wavefunction(-4, x), (x, 0, 2*pi))
1
References
==========
.. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum
Mechanics (4th ed.). Pages 71-73.
"""
# sympify arguments
n, x = S(n), S(x)
return exp(n * I * x) / sqrt(2 * pi)
def energy(n, m, r):
"""
Returns the energy of the state corresponding to quantum number n.
E=(n**2 * (hcross)**2) / (2 * m * r**2)
here n is the quantum number, m is the mass of the particle
and r is the radius of circle.
Examples
========
>>> from sympy.physics.pring import energy
>>> from sympy import Symbol
>>> m=Symbol("m")
>>> r=Symbol("r")
>>> energy(1, m, r)
hbar**2/(2*m*r**2)
>>> energy(2, m, r)
2*hbar**2/(m*r**2)
>>> energy(-2, 2.0, 3.0)
0.111111111111111*hbar**2
References
==========
.. [1] Atkins, Peter W.; Friedman, Ronald (2005). Molecular Quantum
Mechanics (4th ed.). Pages 71-73.
"""
n, m, r = S(n), S(m), S(r)
if n.is_integer:
return (n**2 * hbar**2) / (2 * m * r**2)
else:
raise ValueError("'n' must be integer")
|
Mausy5043/domod | refs/heads/v3 | again21d.py | 1 | #!/usr/bin/env python3
# again21.py measures the DS18B20 temperature.
# uses moving averages
# Wiring :
# Sensor pin : R-Pi B+ pin
# =================:==============
# VIN (red) = 01 - 3v3
# Data (yellow) = 07 - GPIO04
# GND (blue) = 09 - GND
import configparser
import glob
import os
import sys
import syslog
import time
import traceback
from libdaemon import Daemon
# constants
DEBUG = False
IS_JOURNALD = os.path.isfile('/bin/journalctl')
MYID = "".join(list(filter(str.isdigit, os.path.realpath(__file__).split('/')[-1])))
MYAPP = os.path.realpath(__file__).split('/')[-2]
NODE = os.uname()[1]
# SENSOR CALIBRATION PROCEDURE
# Given the existing gain and offset.
# 1 Determine a linear least-squares fit between the output of this program and
# data obtained from a reference sensor
# 2 The least-squares fit will yield the gain(calc) and offset(calc)
# 3 Determine gain(new) and offset(new) as shown here:
# gain(new) = gain(old) * gain(calc)
# offset(new) = offset(old) * gain(calc) + offset(calc)
# 4 Replace the existing values for gain(old) and offset(old) with the values
# found for gain(new) and offset(new)
# gain(old)
DS18B20_gain = 1.0
# offset(old)
DS18B20_offset = 0.0
OWdir = '/sys/bus/w1/devices/'
OWdev = glob.glob(OWdir + '28*')[0]
OWfile = OWdev + '/w1_slave'
class MyDaemon(Daemon):
"""Definition of daemon."""
@staticmethod
def run():
iniconf = configparser.ConfigParser()
inisection = MYID
home = os.path.expanduser('~')
s = iniconf.read(home + '/' + MYAPP + '/config.ini')
syslog_trace("Config file : {0}".format(s), False, DEBUG)
syslog_trace("Options : {0}".format(iniconf.items(inisection)), False, DEBUG)
reporttime = iniconf.getint(inisection, "reporttime")
cycles = iniconf.getint(inisection, "cycles")
samplespercycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
fdata = iniconf.get(inisection, "resultfile")
samples = samplespercycle * cycles # total number of samples averaged
sampletime = reporttime/samplespercycle # time [s] between samples
# cycleTime = samples * sampletime # time [s] per cycle
data = [] # array for holding sampledata
while True:
try:
starttime = time.time()
result = do_work()
syslog_trace("Result : {0}".format(result), False, DEBUG)
if (result is not None):
data.append(float(result))
if (len(data) > samples):
data.pop(0)
syslog_trace("Data : {0}".format(data), False, DEBUG)
# report sample average
if (starttime % reporttime < sampletime):
averages = format(sum(data[:]) / len(data), '.2f')
syslog_trace("Averages : {0}".format(averages), False, DEBUG)
do_report(averages, flock, fdata)
# endif result not None
waittime = sampletime - (time.time() - starttime) - (starttime % sampletime)
if (waittime > 0):
syslog_trace("Waiting : {0}s".format(waittime), False, DEBUG)
syslog_trace("................................", False, DEBUG)
time.sleep(waittime)
except Exception:
syslog_trace("Unexpected error in run()", syslog.LOG_CRIT, DEBUG)
syslog_trace(traceback.format_exc(), syslog.LOG_CRIT, DEBUG)
raise
def read_temp_raw():
lines = "NOPE"
if not(os.path.isfile(OWfile)):
syslog_trace("1-wire sensor not available", syslog.LOG_ERR, DEBUG)
else:
with open(OWfile, 'r') as f:
lines = f.readlines()
return lines
def do_work():
T = T0 = None
# read the temperature sensor
lines = read_temp_raw()
if lines[0].strip()[-3:] == 'YES':
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
T0 = float(temp_string) / 1000.0
# correct the temperature reading
if T0 is not None:
T = T0 * DS18B20_gain + DS18B20_offset
syslog_trace(" T0 = {0:0.1f}*C T = {1:0.1f}degC".format(T0, T), False, DEBUG)
# validate the temperature
if (T is not None) and (T > 45.0):
# can't believe my sensors. Probably a glitch. Log this and return with no result
syslog_trace("Tambient (HIGH): {0}".format(T), syslog.LOG_WARNING, DEBUG)
T = None
return T
def do_report(result, flock, fdata):
# Get the time and date in human-readable form and UN*X-epoch...
outDate = time.strftime('%Y-%m-%dT%H:%M:%S')
outEpoch = int(time.strftime('%s'))
# round to current minute to ease database JOINs
outEpoch = outEpoch - (outEpoch % 60)
lock(flock)
with open(fdata, 'a') as f:
f.write('{0}, {1}, {2}\n'.format(outDate, outEpoch, result))
unlock(flock)
def lock(fname):
open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
def syslog_trace(trace, logerr, out2console):
# Log a python stack trace to syslog
log_lines = trace.split('\n')
for line in log_lines:
if line and logerr:
syslog.syslog(logerr, line)
if line and out2console:
print(line)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/' + MYAPP + '/' + MYID + '.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print("Debug-mode started. Use <Ctrl>+C to stop.")
DEBUG = True
syslog_trace("Daemon logging is ON", syslog.LOG_DEBUG, DEBUG)
daemon.run()
else:
print("Unknown command")
sys.exit(2)
sys.exit(0)
else:
print("usage: {0!s} start|stop|restart|foreground".format(sys.argv[0]))
sys.exit(2)
|
andfoy/margffoy-tuay-server | refs/heads/master | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2928 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
naojsoft/ginga | refs/heads/master | ginga/web/jupyterw/ImageViewJpw.py | 3 | #
# ImageViewJpw.py -- Module for a Ginga FITS viewer in a Jupyter web notebook.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
This example illustrates using a Ginga as the driver of a Jupyter web widget.
REQUIREMENTS:
To use this code you will need the "ipywidgets" and "ipyevents" python
modules installed. These are easily installed via:
$ pip install ipyevents
$ jupyter nbextension enable --py --sys-prefix ipyevents
or via conda:
$ conda install -c conda-forge ipyevents
Basic usage in a Jupyter notebook:
import ipywidgets as widgets
# create a Jupyter image that will be our display surface
# format can be 'jpeg' or 'png'; specify width and height to set viewer size
jp_img = widgets.Image(format='jpeg', width=500, height=500)
# Boilerplate to create a Ginga viewer connected to this widget
# this could be simplified by creating a class that created viewers
# as a factory.
from ginga.misc.log import get_logger
logger = get_logger("v1", log_stderr=True, level=40)
from ginga.web.jupyterw.ImageViewJpw import EnhancedCanvasView
v1 = EnhancedCanvasView(logger=logger)
v1.set_widget(jp_img)
bd = v1.get_bindings()
bd.enable_all(True)
# You can now build a GUI with the image widget and other Jupyter
# widgets. Here we just show the image widget.
v1.embed()
"""
from ipyevents import Event as EventListener
from ginga import ImageView, AstroImage
from ginga import Mixins, Bindings
from ginga.canvas import render
from ginga.util.toolbox import ModeIndicator
from ginga.util import loader
from ginga.web.jupyterw import JpHelp
class ImageViewJpwError(ImageView.ImageViewError):
pass
class ImageViewJpw(ImageView.ImageViewBase):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageView.ImageViewBase.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings)
self.t_.set_defaults(renderer='cairo')
self.rgb_order = 'RGBA'
self.jp_img = None
self.jp_evt = None
self._defer_task = None
self.msgtask = None
self.renderer = None
# Pick a renderer that can work with us
renderers = ['cairo', 'agg', 'pil', 'opencv']
preferred = self.t_['renderer']
if preferred in renderers:
renderers.remove(preferred)
self.possible_renderers = [preferred] + renderers
self.choose_best_renderer()
def set_widget(self, jp_img):
"""Call this method with the Jupyter image widget (image_w)
that will be used.
"""
self.jp_img = jp_img
# TODO: need configure (resize) event callback
# see reschedule_redraw() method
self._defer_task = JpHelp.Timer()
self._defer_task.add_callback('expired',
lambda timer: self.delayed_redraw())
self.msgtask = JpHelp.Timer()
self.msgtask.add_callback('expired',
lambda timer: self.onscreen_message(None))
# for some reason these are stored as strings!
wd, ht = int(jp_img.width), int(jp_img.height)
self.configure_window(wd, ht)
def get_widget(self):
return self.jp_img
def choose_renderer(self, name):
klass = render.get_render_class(name)
self.renderer = klass(self)
if self.jp_img is not None:
wd, ht = int(self.jp_img.width), int(self.jp_img.height)
self.configure_window(wd, ht)
def choose_best_renderer(self):
for name in self.possible_renderers:
try:
self.choose_renderer(name)
self.logger.info("best renderer available is '{}'".format(name))
return
except Exception as e:
continue
raise ImageViewJpwError("No valid renderers available: {}".format(str(self.possible_renderers)))
def update_widget(self):
fmt = self.jp_img.format
web_img = self.renderer.get_surface_as_rgb_format_bytes(
format=fmt)
# this updates the model, and then the Jupyter image(s)
self.jp_img.value = web_img
def reschedule_redraw(self, time_sec):
self._defer_task.stop()
self._defer_task.start(time_sec)
def configure_window(self, width, height):
self.configure(width, height)
def _resize_cb(self, event):
self.configure_window(event.width, event.height)
def set_cursor(self, cursor):
# TODO
pass
def onscreen_message(self, text, delay=None, redraw=True):
if self.jp_img is None:
return
self.msgtask.stop()
self.set_onscreen_message(text, redraw=redraw)
if delay is not None:
self.msgtask.start(delay)
class ImageViewEvent(ImageViewJpw):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageViewJpw.__init__(self, logger=logger, rgbmap=rgbmap,
settings=settings)
self._button = 0
# maps EventListener events to callback handlers
self._evt_dispatch = {
'mousedown': self.button_press_event,
'mouseup': self.button_release_event,
'mousemove': self.motion_notify_event,
'wheel': self.scroll_event,
'mouseenter': self.enter_notify_event,
'mouseleave': self.leave_notify_event,
'keydown': self.key_press_event,
'keyup': self.key_release_event,
}
# mapping from EventListener key events to ginga key events
self._keytbl = {
'shiftleft': 'shift_l',
'shiftright': 'shift_r',
'controlleft': 'control_l',
'controlright': 'control_r',
'altleft': 'alt_l',
'altright': 'alt_r',
'osleft': 'super_l',
'osright': 'super_r',
'contextmenu': 'menu_r',
'backslash': 'backslash',
'space': 'space',
'escape': 'escape',
'enter': 'return',
'tab': 'tab',
'arrowright': 'right',
'arrowleft': 'left',
'arrowup': 'up',
'arrowdown': 'down',
'pageup': 'page_up',
'pagedown': 'page_down',
'f1': 'f1',
'f2': 'f2',
'f3': 'f3',
'f4': 'f4',
'f5': 'f5',
'f6': 'f6',
'f7': 'f7',
'f8': 'f8',
'f9': 'f9',
'f10': 'f10',
'f11': 'f11',
'f12': 'f12',
}
self._keytbl2 = {
'`': 'backquote',
'"': 'doublequote',
"'": 'singlequote',
}
# Define cursors for pick and pan
#hand = openHandCursor()
hand = 'fleur'
self.define_cursor('pan', hand)
cross = 'cross'
self.define_cursor('pick', cross)
for name in ('motion', 'button-press', 'button-release',
'key-press', 'key-release', 'drag-drop',
'scroll', 'map', 'focus', 'enter', 'leave',
'pinch', 'rotate', 'pan', 'swipe', 'tap'):
self.enable_callback(name)
def set_widget(self, jp_imgw):
"""Call this method with the Jupyter image widget (image_w)
that will be used.
"""
super(ImageViewEvent, self).set_widget(jp_imgw)
self.jp_evt = EventListener(source=jp_imgw)
self.jp_evt.watched_events = [
'keydown', 'keyup', 'mouseenter', 'mouseleave',
'mousedown', 'mouseup', 'mousemove', 'wheel',
'contextmenu'
]
self.jp_evt.prevent_default_action = True
self.jp_evt.on_dom_event(self._handle_event)
self.logger.info("installed event handlers")
return self.make_callback('map')
def _handle_event(self, event):
# TODO: need focus events and maybe a map event
# TODO: Set up widget as a drag and drop destination
evt_kind = event['type']
handler = self._evt_dispatch.get(evt_kind, None)
if handler is not None:
return handler(event)
return False
def transkey(self, keycode, keyname=None):
keycode = str(keycode).lower()
if keyname is None:
keyname = keycode
self.logger.debug("key code in jupyter '%s'" % (keycode))
res = self._keytbl.get(keycode, None)
if res is None:
res = self._keytbl2.get(keyname, keyname)
return res
def get_key_table(self):
return self._keytbl
def focus_event(self, event, has_focus):
return self.make_callback('focus', has_focus)
def enter_notify_event(self, event):
enter_focus = self.t_.get('enter_focus', False)
if enter_focus:
# TODO: set focus on canvas
pass
return self.make_callback('enter')
def leave_notify_event(self, event):
self.logger.debug("leaving widget...")
return self.make_callback('leave')
def key_press_event(self, event):
keyname = self.transkey(event['code'], keyname=event['key'])
self.logger.debug("key press event, key=%s" % (keyname))
return self.make_ui_callback_viewer(self, 'key-press', keyname)
def key_release_event(self, event):
keyname = self.transkey(event['code'], keyname=event['key'])
self.logger.debug("key release event, key=%s" % (keyname))
return self.make_ui_callback_viewer(self, 'key-release', keyname)
def button_press_event(self, event):
x, y = event['dataX'], event['dataY']
self.last_win_x, self.last_win_y = x, y
button = 0
button |= 0x1 << event['button']
self._button = button
self.logger.debug("button event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback_viewer(self, 'button-press', button, data_x, data_y)
def button_release_event(self, event):
x, y = event['dataX'], event['dataY']
self.last_win_x, self.last_win_y = x, y
button = 0
button |= 0x1 << event['button']
self._button = 0
self.logger.debug("button release at %dx%d button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback_viewer(self, 'button-release', button, data_x, data_y)
def motion_notify_event(self, event):
button = self._button
x, y = event['dataX'], event['dataY']
self.last_win_x, self.last_win_y = x, y
self.logger.debug("motion event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback_viewer(self, 'motion', button, data_x, data_y)
def scroll_event(self, event):
x, y = event['dataX'], event['dataY']
self.last_win_x, self.last_win_y = x, y
dx, dy = event['deltaX'], event['deltaY']
if (dx != 0 or dy != 0):
# <= This browser gives us deltas for x and y
# Synthesize this as a pan gesture event
self.make_ui_callback_viewer(self, 'pan', 'start', 0, 0)
self.make_ui_callback_viewer(self, 'pan', 'move', -dx, -dy)
return self.make_ui_callback_viewer(self, 'pan', 'stop', 0, 0)
# <= This code path should not be followed under normal
# circumstances.
# we leave it here in case we want to make the scroll
# callback configurable in the future
# TODO: calculate actual angle of direction
if dy < 0:
direction = 0.0 # up
elif dy > 0:
direction = 180.0 # down
else:
return False
# 15 deg is standard 1-click turn for a wheel mouse
num_deg = 15.0
self.logger.debug("scroll deg=%f direction=%f" % (
num_deg, direction))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback_viewer(self, 'scroll', direction, num_deg,
data_x, data_y)
class ImageViewZoom(Mixins.UIMixin, ImageViewEvent):
# class variables for binding map and bindings can be set
bindmapClass = Bindings.BindingMapper
bindingsClass = Bindings.ImageViewBindings
@classmethod
def set_bindingsClass(cls, klass):
cls.bindingsClass = klass
@classmethod
def set_bindmapClass(cls, klass):
cls.bindmapClass = klass
def __init__(self, logger=None, rgbmap=None, settings=None,
bindmap=None, bindings=None):
ImageViewEvent.__init__(self, logger=logger, rgbmap=rgbmap,
settings=settings)
Mixins.UIMixin.__init__(self)
self.ui_set_active(True, viewer=self)
if bindmap is None:
bindmap = ImageViewZoom.bindmapClass(self.logger)
self.bindmap = bindmap
bindmap.register_for_events(self)
if bindings is None:
bindings = ImageViewZoom.bindingsClass(self.logger)
self.set_bindings(bindings)
def get_bindmap(self):
return self.bindmap
def get_bindings(self):
return self.bindings
def set_bindings(self, bindings):
self.bindings = bindings
bindings.set_bindings(self)
class CanvasView(ImageViewZoom):
def __init__(self, logger=None, settings=None, rgbmap=None,
bindmap=None, bindings=None):
ImageViewZoom.__init__(self, logger=logger, settings=settings,
rgbmap=rgbmap,
bindmap=bindmap, bindings=bindings)
# Needed for UIMixin to propagate events correctly
self.objects = [self.private_canvas]
self._mi = ModeIndicator(self)
def set_canvas(self, canvas, private_canvas=None):
super(CanvasView, self).set_canvas(canvas,
private_canvas=private_canvas)
self.objects[0] = self.private_canvas
class EnhancedCanvasView(CanvasView):
"""
This just adds some convenience methods to the viewer for loading images,
grabbing screenshots, etc. You can subclass to add new methods.
"""
def embed(self):
"""
Embed a viewer into a Jupyter notebook.
"""
return self.jp_img
def open(self, new=1):
"""
Open this viewer in a new browser window or tab.
"""
# TBD
raise Exception("Not yet implemented!")
def show(self, fmt=None):
"""
Capture the window of a viewer.
"""
# force any delayed redraws
# TODO: this really needs to be addressed in get_rgb_image_as_bytes()
# of the various superclasses, as it affects other backends as well
self.redraw_now()
from IPython.display import Image
if fmt is None:
# what format are we using for the Jupyter image--use that
fmt = self.jp_img.format
return Image(data=bytes(self.get_rgb_image_as_bytes(format=fmt)),
format=fmt, embed=True)
def load(self, filepath):
"""
Load a file into the viewer.
"""
image = loader.load_file(filepath, logger=self.logger)
self.set_image(image)
load_fits = load
def load_hdu(self, hdu):
"""
Load an HDU into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.load_hdu(hdu)
self.set_image(image)
def load_data(self, data_np):
"""
Load raw numpy data into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.set_data(data_np)
self.set_image(image)
def add_canvas(self, tag=None):
# add a canvas to the view
my_canvas = self.get_canvas()
DrawingCanvas = my_canvas.get_draw_class('drawingcanvas')
canvas = DrawingCanvas()
# enable drawing on the canvas
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype(None)
canvas.ui_set_active(True, viewer=self)
canvas.set_surface(self)
canvas.register_for_cursor_drawing(self)
# add the canvas to the view.
my_canvas.add(canvas, tag=tag)
return canvas
|
ihacklog/osdlyrics | refs/heads/master | tools/create-lyricsource.py | 1 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Tiger Soldier <tigersoldi@gmail.com>
#
# This file is part of OSD Lyrics.
#
# OSD Lyrics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OSD Lyrics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OSD Lyrics. If not, see <http://www.gnu.org/licenses/>.
#/
import re
import readline
import os
import os.path
import string
ROOTMAKEFILEAM = r"""SUBDIRS = src
"""
MAKEFILEAM = r"""${name}_PYTHON = ${name}.py
${name}dir = $$(pkglibdir)/lyricsources/${name}
servicedir = $$(datadir)/dbus-1/services
service_in_files = org.osdlyrics.LyricSourcePlugin.${name}.service.in
service_DATA = $$(service_in_files:.service.in=.service)
EXTRA_DIST = \
$$(service_in_files) \
$$(NULL)
$$(service_DATA): $$(service_in_files)
@sed -e "s|\@pkglibdir\@|$$(pkglibdir)|" -e "s|\@PYTHON\@|$$(PYTHON)|" $$< > $$@
CLEANFILES = \
org.osdlyrics.LyricSourcePlugin.${name}.service \
$$(NULL)
"""
SERVICE = r"""[D-BUS Service]
Name=org.osdlyrics.LyricSourcePlugin.${name}
Exec=@PYTHON@ @pkglibdir@/lyricsources/${name}/${name}.py
"""
PYTHON = r"""# -*- coding: utf-8 -*-
class ${capsname}Source(BaseLyricSourcePlugin):
def __init__(self):
BaseLyricSourcePlugin.__init__(self, id='${name}', name='${name}')
def do_search(self, metadata):
# return list of SearchResult
# you can make use of utils.http_download
#
# example:
status, content = http_download(url='http://foo.bar/foobar'
params={param1='foo', param2='bar'},
proxy=get_proxy_settings(config=self.config_proxy))
if status < 200 or status >= 400:
raise httplib.HTTPException(status, '')
# now do something with content
return [SearchResult(title='title',
artist='artist',
album='album',
sourceid=self.id,
downloadinfo='http://foo.bar/download?id=1')]
def do_download(self, downloadinfo):
# return a string
# downloadinfo is what you set in SearchResult
if not isinstance(downloadinfo, str) and \
not isinstance(downloadinfo, unicode):
raise TypeError('Expect the downloadinfo as a string of url, but got type ',
type(downloadinfo))
status, content = http_download(url=downloadinfo,
proxy=get_proxy_settings(self.config_proxy))
if status < 200 or status >= 400:
raise httplib.HTTPException(status, '')
return content
if __name__ == '__main__':
${name} = ${capsname}Source()
${name}._app.run()
"""
def input_name():
prompt = 'Input the lyric source name with only lower-case alphabets and numbers:\n'
while True:
name = raw_input(prompt).strip().lower()
if not re.match(r'[a-z][a-z0-9]*$', name):
prompt = 'Invalid name. Name must contain only lower-case alphabets and numbers.\nName:'
else:
break
return name
def input_boolean(prompt, default_value):
prompt += ' [Y/n]?' if default_value == True else ' [y/N]'
value = raw_input(prompt)
if value.lower() == 'y':
return True
elif value.lower() == 'n':
return False
else:
return default_value == True
def create_file(template, path, name, params):
content = string.Template(template).substitute(params)
f = open(os.path.join(path, name), 'w')
f.write(content)
f.close()
def main():
name = input_name()
have_am = input_boolean('Generate Makefile.am', True)
have_subdir = input_boolean('Create source files in src subdirectory', False)
rootpath = name
srcpath = name if not have_subdir else name + '/src'
if not os.path.isdir(srcpath):
os.makedirs(srcpath)
params = {
'name': name,
'capsname': name.capitalize()
}
create_file(PYTHON, srcpath, name + '.py', params)
create_file(SERVICE, srcpath, 'org.osdlyrics.LyricSourcePlugin.' + name + '.service.in', params)
if have_am:
create_file(MAKEFILEAM, srcpath, 'Makefile.am', params)
if have_subdir:
create_file(ROOTMAKEFILEAM, rootpath, 'Makefile.am', params)
print 'Done'
if __name__ == '__main__':
main()
|
philwilliammee/pyPincher2 | refs/heads/master | tool_path_editor.py | 1 | #!/usr/bin/env python2.7
'''
do testing here
'''
from numpy import *
import numpy as np
import pylab as p
import matplotlib.axes as a
import mpl_toolkits.mplot3d.axes3d as a3d
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
#open a obj file and read faces and vertices
vertices = []
faces = []
material = None
read_f = ("python.obj")
for i, line in enumerate(open(read_f, "r")):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = map(float, values[1:4])
vertices.append(v)
elif values[0] == 'f':
face = []
texcoords = []
norms = []
for v in values[1:]:
w = v.split('/')
face.append(int(w[0]))
faces.append(face)
print vertices
'''
all_v = []
for v in faces:
for w in v:
all_v.append(w)
print all_v
v_set = (set(all_v))
print v_set
per = []
for f in v_set:
per.append(vertices[f-1])
print per
'''
xs,ys,zs=[],[],[]
xs1,ys1,zs1=[],[],[]
for x1,y1,z1 in (vertices):
xs.append(x1)
ys.append(y1)
zs.append(z1)
quad = []
for f in (faces):
quad.append([vertices[v - 1] for v in f])
print quad
def pointdistance(p1,p2):
#print "p1,p2",p1,p2
p1=p1[:3]
p2=p2[:3]
x,y,z = [list(a) for a in zip(p1, p2)]
xd = x[1]-x[0]
yd = y[1]-y[0]
zd = z[1]-z[0]
distance = math.sqrt((xd*xd) + (yd*yd) + (zd*zd))
return distance
def save_obj(xs,ys,zs):
write_f = open('C:/Users/Phil Williammee/Desktop/pyjunk/pyThon.obj', 'w+')
for xx,yy,zz in zip(xs,ys,zs):
line = "v "+str(xx)+" "+str(yy)+" "+str(zz)+"\n"
write_f.write(line)
write_f.close()
x=[]
y=[]
z=[]
b=[]
last_v= [0,0,0]
def filter(i,v):
b.append((i,v))
x.append(vertices[i-1][0])
y.append(vertices[i-1][2])
z.append(80)
#go over
x.append(v[0])
y.append(v[2])
z.append(80)
#go up
#go down
x.append(v[0])
y.append(v[2])
z.append(65.50)
def search():
found=[]
bad = [0, 360, 366, 372, 1142, 1160, 1179, 1683, 1689, 1695, 1696, 1701, 1707, 1719, 1731]
some_good = [0, 99, 360, 366, 367, 372, 950, 1142, 1160, 1179, 1491, 1683, 1689, 1690, 1695, 1696, 1701, 1702, 1707, 1709, 1719, 1720, 1721, 1731]
for g in some_good:
if g not in bad:
found.append(g)
print "good?",found
def draw_vert():
global last_v
skip=[]#1, 361, 373, 1684, 1708, 1749]
for i, v in enumerate(vertices):
distance = pointdistance(v, last_v)
if distance > 2.0 and i not in skip:
filter(i,v)
else:
x.append(v[0])
y.append(v[2])
z.append(65.5)
last_v = v
a=[b1[0] for b1 in b]
print "bad", a
#search()
draw_vert()
# Create main figure
fig=p.figure()
fig.suptitle('Object', fontsize=14, fontweight='bold')
#ax = a3d.Axes3D(fig)
x = np.multiply(7,x)
y = np.multiply(-7,y)
x=np.add(-200,x)
y=np.add(100,y)
ax = Axes3D(fig)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot3D(x,y,z)
save_obj(x,y,z)
p.show()
|
hubsaysnuaa/odoo | refs/heads/8.0 | addons/account_asset/report/__init__.py | 445 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vshtanko/scikit-learn | refs/heads/master | sklearn/cluster/birch.py | 207 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
cubells/l10n-spain | refs/heads/12.0 | l10n_es_aeat_sii/models/product_product.py | 2 | # Copyright 2017 MINORISA (http://www.minorisa.net)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ProductTemplate(models.Model):
_inherit = "product.template"
sii_exempt_cause = fields.Selection(
string="SII Exempt Cause",
selection=[('none', 'None'),
('E1', '[E1] Art. 20: Operaciones interiores exentas'),
('E2', '[E2] Art. 21: Exenciones en las exportaciones de '
'bienes'),
('E3', '[E3] Art. 22: Exenciones en las operaciones '
'asimiladas a las exportaciones'),
('E4', '[E4] Art. 23 y 24: Exenciones relativas a '
'regímenes aduaneros y fiscales. Exenciones zonas '
'francas, depósitos francos y otros depósitos.'),
('E5', '[E5] Art. 25: Exenciones en las entregas de bienes '
'destinados a otro estado miembro.'),
('E6', '[E6] Otros')],
)
|
lfontana/BrewsBrothersChillerFrontEnd | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 1284 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
ARM-software/CMSIS_5 | refs/heads/develop | CMSIS/NN/Scripts/NNFunctions/table_gen.py | 3 | #!/usr/bin/python
import math
class Table(object):
def __init__(self, table_entry=256, table_range=8):
self.table_entry = table_entry
self.table_range = table_range
pass
def sigmoid(self, x):
return 1 / (1 + math.exp(-1*x))
def tanh(self, x):
return (math.exp(2*x)-1) / (math.exp(2*x)+1)
def fp2q7(self, x):
x_int = math.floor(x*(2**7)+0.5)
if x_int >= 128 :
x_int = 127
if x_int < -128 :
x_int = -128
if x_int >= 0 :
return x_int
else :
return 0x100 + x_int
def fp2q15(self, x):
x_int = math.floor(x*(2**15)+0.5)
if x_int >= 2**15 :
x_int = 2**15-1
if x_int < -1*2**15 :
x_int = -1*2**15
if x_int >= 0 :
return x_int
else :
return 0x10000 + x_int
def table_gen(self):
outfile = open("NNCommonTable.c", "wb")
outfile.write("/*\n * Common tables for NN\n *\n *\n *\n *\n */\n\n#include \"arm_math.h\"\n#include \"NNCommonTable.h\"\n\n/*\n * Table for sigmoid\n */\n")
for function_type in ["sigmoid", "tanh"]:
for data_type in [7, 15]:
out_type = "q"+str(data_type)+"_t"
act_func = getattr(self, function_type)
quan_func = getattr(self, 'fp2q'+str(data_type))
# unified table
outfile.write('const %s %sTable_q%d[%d] = {\n' % (out_type, function_type, data_type, self.table_entry) )
for i in range(self.table_entry):
# convert into actual value
if i < self.table_entry/2:
value_q7 = self.table_range * (i)
else:
value_q7 = self.table_range * (i - self.table_entry)
if data_type == 7:
#outfile.write('%f, ' % (act_func(float(value_q7)/256)))
outfile.write('0x%02x, ' % (quan_func(act_func(float(value_q7)/self.table_entry))))
else:
#outfile.write('%f, ' % (act_func(float(value_q7)/256)))
outfile.write('0x%04x, ' % (quan_func(act_func(float(value_q7)/self.table_entry))))
if i % 8 == 7:
outfile.write("\n")
outfile.write("};\n\n")
for data_type in [15]:
out_type = "q"+str(data_type)+"_t"
act_func = getattr(self, function_type)
quan_func = getattr(self, 'fp2q'+str(data_type))
# H-L tables
outfile.write('const %s %sLTable_q%d[%d] = {\n' % (out_type, function_type, data_type, self.table_entry/2))
for i in range(self.table_entry/2):
# convert into actual value, max value is 16*self.table_entry/4 / 4
# which is equivalent to self.table_entry / self.table_entry/2 = 2, i.e., 1/4 of 8
if i < self.table_entry/4:
value_q7 = self.table_range * i / 4
else:
value_q7 = self.table_range * (i - self.table_entry/2) / 4
if data_type == 7:
#outfile.write('%f, ' % (act_func(float(value_q7)/256)))
outfile.write('0x%02x, ' % (quan_func(act_func(float(value_q7)/(self.table_entry/2)))))
else:
#outfile.write('%f, ' % (act_func(float(value_q7)/256)))
outfile.write('0x%04x, ' % (quan_func(act_func(float(value_q7)/(self.table_entry/2)))))
if i % 8 == 7:
outfile.write("\n")
outfile.write("};\n\n")
outfile.write('const %s %sHTable_q%d[%d] = {\n' % (out_type, function_type, data_type, 3*self.table_entry/4))
for i in range(3 * self.table_entry/4):
# convert into actual value, tageting range (2, 8)
if i < 3*self.table_entry/8 :
value_q7 = self.table_range * ( i + self.table_entry/8 )
else:
value_q7 = self.table_range * ( i + self.table_entry/8 - self.table_entry)
if data_type == 7:
#outfile.write('%f, ' % (act_func(float(value_q7)/256)))
outfile.write('0x%02x, ' % (quan_func(act_func(float(value_q7)/self.table_entry))))
else:
#outfile.write('%f, ' % (act_func(float(value_q7)/256)))
outfile.write('0x%04x, ' % (quan_func(act_func(float(value_q7)/self.table_entry))))
if i % 8 == 7:
outfile.write("\n")
outfile.write("};\n\n")
outfile.close()
mytable = Table(table_entry=256, table_range=16)
mytable.table_gen()
|
mikedingjan/wagtail | refs/heads/master | wagtail/contrib/routable_page/tests.py | 7 | from unittest import mock
from django.test import RequestFactory, TestCase
from django.urls.exceptions import NoReverseMatch
from wagtail.contrib.routable_page.templatetags.wagtailroutablepage_tags import routablepageurl
from wagtail.core.models import Page, Site
from wagtail.tests.routablepage.models import (
RoutablePageTest, RoutablePageWithOverriddenIndexRouteTest)
class TestRoutablePage(TestCase):
model = RoutablePageTest
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=self.model(
title="Routable Page",
live=True,
))
def test_resolve_index_route_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/')
self.assertEqual(view, self.routable_page.index_route)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_resolve_archive_by_year_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/year/2014/')
self.assertEqual(view, self.routable_page.archive_by_year)
self.assertEqual(args, ('2014', ))
self.assertEqual(kwargs, {})
def test_resolve_archive_by_author_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/author/joe-bloggs/')
self.assertEqual(view, self.routable_page.archive_by_author)
self.assertEqual(args, ())
self.assertEqual(kwargs, {'author_slug': 'joe-bloggs'})
def test_resolve_external_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external/joe-bloggs/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ('joe-bloggs', ))
self.assertEqual(kwargs, {})
def test_resolve_external_view_other_route(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external-no-arg/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_reverse_index_route_view(self):
url = self.routable_page.reverse_subpage('index_route')
self.assertEqual(url, '')
def test_reverse_archive_by_year_view(self):
url = self.routable_page.reverse_subpage('archive_by_year', args=('2014', ))
self.assertEqual(url, 'archive/year/2014/')
def test_reverse_archive_by_author_view(self):
url = self.routable_page.reverse_subpage('archive_by_author', kwargs={'author_slug': 'joe-bloggs'})
self.assertEqual(url, 'archive/author/joe-bloggs/')
def test_reverse_overridden_name(self):
url = self.routable_page.reverse_subpage('name_overridden')
self.assertEqual(url, 'override-name-test/')
def test_reverse_overridden_name_default_doesnt_work(self):
with self.assertRaises(NoReverseMatch):
self.routable_page.reverse_subpage('override_name_test')
def test_reverse_external_view(self):
url = self.routable_page.reverse_subpage('external_view', args=('joe-bloggs', ))
self.assertEqual(url, 'external/joe-bloggs/')
def test_reverse_external_view_other_route(self):
url = self.routable_page.reverse_subpage('external_view')
self.assertEqual(url, 'external-no-arg/')
def test_get_index_route_view(self):
response = self.client.get(self.routable_page.url)
self.assertContains(response, "DEFAULT PAGE TEMPLATE")
def test_get_routable_page_with_overridden_index_route(self):
page = self.home_page.add_child(
instance=RoutablePageWithOverriddenIndexRouteTest(
title="Routable Page with overridden index",
live=True
)
)
response = self.client.get(page.url)
self.assertContains(response, "OVERRIDDEN INDEX ROUTE")
self.assertNotContains(response, "DEFAULT PAGE TEMPLATE")
def test_get_archive_by_year_view(self):
response = self.client.get(self.routable_page.url + 'archive/year/2014/')
self.assertContains(response, "ARCHIVE BY YEAR: 2014")
def test_earlier_view_takes_precedence(self):
response = self.client.get(self.routable_page.url + 'archive/year/1984/')
self.assertContains(response, "we were always at war with eastasia")
def test_get_archive_by_author_view(self):
response = self.client.get(self.routable_page.url + 'archive/author/joe-bloggs/')
self.assertContains(response, "ARCHIVE BY AUTHOR: joe-bloggs")
def test_get_external_view(self):
response = self.client.get(self.routable_page.url + 'external/joe-bloggs/')
self.assertContains(response, "EXTERNAL VIEW: joe-bloggs")
def test_get_external_view_other_route(self):
response = self.client.get(self.routable_page.url + 'external-no-arg/')
self.assertContains(response, "EXTERNAL VIEW: ARG NOT SET")
def test_routable_page_can_have_instance_bound_descriptors(self):
# This descriptor pretends that it does not exist in the class, hence
# it raises an AttributeError when class bound. This is, for instance,
# the behavior of django's FileFields.
class InstanceDescriptor:
def __get__(self, instance, cls=None):
if instance is None:
raise AttributeError
return 'value'
def __set__(self, instance, value):
raise AttributeError
try:
RoutablePageTest.descriptor = InstanceDescriptor()
RoutablePageTest.get_subpage_urls()
finally:
del RoutablePageTest.descriptor
class TestRoutablePageTemplateTag(TestCase):
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.request.site = Site.find_for_request(self.request)
self.context = {'request': self.request}
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, '/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, '/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, '/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, '/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = '/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
class TestRoutablePageTemplateTagForSecondSiteAtSameRoot(TestCase):
"""
When multiple sites exist on the same root page, relative URLs within that subtree should
omit the domain, in line with #4390
"""
def setUp(self):
default_site = Site.objects.get(is_default_site=True)
second_site = Site.objects.create( # add another site with the same root page
hostname='development.local',
port=default_site.port,
root_page_id=default_site.root_page_id,
)
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.request.site = Site.find_for_request(self.request)
self.context = {'request': self.request}
self.request.site = second_site
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, '/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, '/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, '/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, '/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = '/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
class TestRoutablePageTemplateTagForSecondSiteAtDifferentRoot(TestCase):
"""
When multiple sites exist, relative URLs between such sites should include the domain portion
"""
def setUp(self):
self.home_page = Page.objects.get(id=2)
events_page = self.home_page.add_child(instance=Page(title='Events', live=True))
second_site = Site.objects.create(
hostname='events.local',
port=80,
root_page=events_page,
)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.request.site = Site.find_for_request(self.request)
self.context = {'request': self.request}
self.request.site = second_site
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, 'http://localhost/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, 'http://localhost/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, 'http://localhost/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, 'http://localhost/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = 'http://localhost/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
|
onshape-public/onshape-clients | refs/heads/master | python/onshape_client/oas/models/bt_global_tree_node_list_response_bt_team_info.py | 1 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_team_info
except ImportError:
bt_team_info = sys.modules["onshape_client.oas.models.bt_team_info"]
class BTGlobalTreeNodeListResponseBTTeamInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"href": (str,), # noqa: E501
"items": ([bt_team_info.BTTeamInfo],), # noqa: E501
"next": (str,), # noqa: E501
"previous": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"href": "href", # noqa: E501
"items": "items", # noqa: E501
"next": "next", # noqa: E501
"previous": "previous", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_global_tree_node_list_response_bt_team_info.BTGlobalTreeNodeListResponseBTTeamInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
href (str): [optional] # noqa: E501
items ([bt_team_info.BTTeamInfo]): [optional] # noqa: E501
next (str): [optional] # noqa: E501
previous (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
antoinecarme/pyaf | refs/heads/master | tests/artificial/transf_BoxCox/trend_MovingAverage/cycle_12/ar_/test_artificial_128_BoxCox_MovingAverage_12__0.py | 1 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "BoxCox", sigma = 0.0, exog_count = 0, ar_order = 0); |
Critical-Impact/django-inplaceedit | refs/heads/master | testing/testing/test_fk/views.py | 6027 | # Create your views here.
|
pwnieexpress/pwn_plug_sources | refs/heads/master | src/metagoofil/hachoir_parser/file_system/__init__.py | 94 | from hachoir_parser.file_system.ext2 import EXT2_FS
from hachoir_parser.file_system.fat import FAT12, FAT16, FAT32
from hachoir_parser.file_system.mbr import MSDos_HardDrive
from hachoir_parser.file_system.ntfs import NTFS
from hachoir_parser.file_system.iso9660 import ISO9660
from hachoir_parser.file_system.reiser_fs import REISER_FS
from hachoir_parser.file_system.linux_swap import LinuxSwapFile
|
gtaylor/ansible | refs/heads/devel | v2/ansible/utils/debug.py | 210 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import sys
from multiprocessing import Lock
from ansible import constants as C
global_debug_lock = Lock()
def debug(msg):
if C.DEFAULT_DEBUG:
global_debug_lock.acquire()
print("%6d %0.5f: %s" % (os.getpid(), time.time(), msg))
sys.stdout.flush()
global_debug_lock.release()
|
GustavoHennig/ansible | refs/heads/devel | lib/ansible/modules/cloud/google/gce_img.py | 70 | #!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""An Ansible module to utilize GCE image resources."""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_img
version_added: "1.9"
short_description: utilize GCE image resources
description:
- This module can create and delete GCE private images from gzipped
compressed tarball containing raw disk data or from existing detached
disks in any zone. U(https://cloud.google.com/compute/docs/images)
options:
name:
description:
- the name of the image to create or delete
required: true
default: null
description:
description:
- an optional description
required: false
default: null
family:
description:
- an optional family name
required: false
default: null
version_added: "2.2"
source:
description:
- the source disk or the Google Cloud Storage URI to create the image from
required: false
default: null
state:
description:
- desired state of the image
required: false
default: "present"
choices: ["present", "absent"]
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
timeout:
description:
- timeout for the operation
required: false
default: 180
version_added: "2.0"
service_account_email:
description:
- service account email
required: false
default: null
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
project_id:
description:
- your GCE project ID
required: false
default: null
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Tom Melendez (supertom)"
'''
EXAMPLES = '''
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
- gce_img:
name: test-image
source: test-disk
zone: us-central1-a
state: present
# Create an image named test-image from a tarball in Google Cloud Storage.
- gce_img:
name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme
- gce_img:
name: test-image
source: gs://bucket/path/to/image.tgz
# Delete an image named test-image.
- gce_img:
name: test-image
state: absent
'''
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
_ = Provider.GCE
has_libcloud = True
except ImportError:
has_libcloud = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
GCS_URI = 'https://storage.googleapis.com/'
def create_image(gce, name, module):
"""Create an image with the specified name."""
source = module.params.get('source')
zone = module.params.get('zone')
desc = module.params.get('description')
timeout = module.params.get('timeout')
family = module.params.get('family')
if not source:
module.fail_json(msg='Must supply a source', changed=False)
if source.startswith(GCS_URI):
# source is a Google Cloud Storage URI
volume = source
elif source.startswith('gs://'):
# libcloud only accepts https URI.
volume = source.replace('gs://', GCS_URI)
else:
try:
volume = gce.ex_get_volume(source, zone)
except ResourceNotFoundError:
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
changed=False)
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
gce_extra_args = {}
if family is not None:
gce_extra_args['family'] = family
old_timeout = gce.connection.timeout
try:
gce.connection.timeout = timeout
gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
return True
except ResourceExistsError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
finally:
gce.connection.timeout = old_timeout
def delete_image(gce, name, module):
"""Delete a specific image resource by name."""
try:
gce.ex_delete_image(name)
return True
except ResourceNotFoundError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
family=dict(),
description=dict(),
source=dict(),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(type='path'),
project_id=dict(),
timeout=dict(type='int', default=180)
)
)
if not has_libcloud:
module.fail_json(msg='libcloud with GCE support is required.')
gce = gce_connect(module)
name = module.params.get('name')
state = module.params.get('state')
family = module.params.get('family')
changed = False
if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
changed=False)
# user wants to create an image.
if state == 'present':
changed = create_image(gce, name, module)
# user wants to delete the image.
if state == 'absent':
changed = delete_image(gce, name, module)
module.exit_json(changed=changed, name=name)
if __name__ == '__main__':
main()
|
smmribeiro/intellij-community | refs/heads/master | python/testData/intentions/PyAnnotateVariableTypeIntentionTest/typeCommentLocalWithTarget.py | 38 | def func():
with open('file.txt') as var:
v<caret>ar
|
saintdragon2/python-3-lecture-2015 | refs/heads/master | civil-final/1st_presentation/11조/pygame/tests/display_test.py | 26 | if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest
else:
from test.test_utils import test_not_implemented, unittest
import pygame, pygame.transform
class DisplayModuleTest( unittest.TestCase ):
def test_update( self ):
""" see if pygame.display.update takes rects with negative values.
"|Tags:display|"
"""
if 1:
pygame.init()
screen = pygame.display.set_mode((100,100))
screen.fill((55,55,55))
r1 = pygame.Rect(0,0,100,100)
pygame.display.update(r1)
r2 = pygame.Rect(-10,0,100,100)
pygame.display.update(r2)
r3 = pygame.Rect(-10,0,-100,-100)
pygame.display.update(r3)
# NOTE: if I don't call pygame.quit there is a segfault. hrmm.
pygame.quit()
# I think it's because unittest runs stuff in threads
# here's a stack trace...
# NOTE to author of above:
# unittest doesn't run tests in threads
# segfault was probably caused by another tests need
# for a "clean slate"
"""
#0 0x08103b7c in PyFrame_New ()
#1 0x080bd666 in PyEval_EvalCodeEx ()
#2 0x08105202 in PyFunction_SetClosure ()
#3 0x080595ae in PyObject_Call ()
#4 0x080b649f in PyEval_CallObjectWithKeywords ()
#5 0x08059585 in PyObject_CallObject ()
#6 0xb7f7aa2d in initbase () from /usr/lib/python2.4/site-packages/pygame/base.so
#7 0x080e09bd in Py_Finalize ()
#8 0x08055597 in Py_Main ()
#9 0xb7e04eb0 in __libc_start_main () from /lib/tls/libc.so.6
#10 0x08054e31 in _start ()
"""
def todo_test_Info(self):
# __doc__ (as of 2008-08-02) for pygame.display.Info:
# pygame.display.Info(): return VideoInfo
# Create a video display information object
#
# Creates a simple object containing several attributes to describe
# the current graphics environment. If this is called before
# pygame.display.set_mode() some platforms can provide information
# about the default display mode. This can also be called after
# setting the display mode to verify specific display options were
# satisfied. The VidInfo object has several attributes:
#
# hw: True if the display is hardware accelerated
# wm: True if windowed display modes can be used
# video_mem: The megabytes of video memory on the display. This is 0 if unknown
# bitsize: Number of bits used to store each pixel
# bytesize: Number of bytes used to store each pixel
# masks: Four values used to pack RGBA values into pixels
# shifts: Four values used to pack RGBA values into pixels
# losses: Four values used to pack RGBA values into pixels
# blit_hw: True if hardware Surface blitting is accelerated
# blit_hw_CC: True if hardware Surface colorkey blitting is accelerated
# blit_hw_A: True if hardware Surface pixel alpha blitting is accelerated
# blit_sw: True if software Surface blitting is accelerated
# blit_sw_CC: True if software Surface colorkey blitting is accelerated
# blit_sw_A: True if software Surface pixel alpha blitting is acclerated
# current_h, current_h: Width and height of the current video mode, or of the
# desktop mode if called before the display.set_mode is called.
# (current_h, current_w are available since SDL 1.2.10, and pygame 1.8.0)
# They are -1 on error, or if an old SDL is being used.
self.fail()
if 0:
pygame.init()
inf = pygame.display.Info()
print ("before a display mode has been set")
print (inf)
self.assertNotEqual(inf.current_h, -1)
self.assertNotEqual(inf.current_w, -1)
#probably have an older SDL than 1.2.10 if -1.
screen = pygame.display.set_mode((100,100))
inf = pygame.display.Info()
print (inf)
self.assertNotEqual(inf.current_h, -1)
self.assertEqual(inf.current_h, 100)
self.assertEqual(inf.current_w, 100)
#pygame.quit()
def todo_test_flip(self):
# __doc__ (as of 2008-08-02) for pygame.display.flip:
# pygame.display.flip(): return None
# update the full display Surface to the screen
#
# This will update the contents of the entire display. If your display
# mode is using the flags pygame.HWSURFACE and pygame.DOUBLEBUF, this
# will wait for a vertical retrace and swap the surfaces. If you are
# using a different type of display mode, it will simply update the
# entire contents of the surface.
#
# When using an pygame.OPENGL display mode this will perform a gl buffer swap.
self.fail()
def todo_test_get_active(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_active:
# pygame.display.get_active(): return bool
# true when the display is active on the display
#
# After pygame.display.set_mode() is called the display Surface will
# be visible on the screen. Most windowed displays can be hidden by
# the user. If the display Surface is hidden or iconified this will
# return False.
#
self.fail()
def todo_test_get_caption(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_caption:
# pygame.display.get_caption(): return (title, icontitle)
# get the current window caption
#
# Returns the title and icontitle for the display Surface. These will
# often be the same value.
#
self.fail()
def todo_test_get_driver(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_driver:
# pygame.display.get_driver(): return name
# get the name of the pygame display backend
#
# Pygame chooses one of many available display backends when it is
# initialized. This returns the internal name used for the display
# backend. This can be used to provide limited information about what
# display capabilities might be accelerated. See the SDL_VIDEODRIVER
# flags in pygame.display.set_mode() to see some of the common
# options.
#
self.fail()
def todo_test_get_init(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_init:
# pygame.display.get_init(): return bool
# true if the display module is initialized
#
# Returns True if the pygame.display module is currently initialized.
self.fail()
def todo_test_get_surface(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_surface:
# pygame.display.get_surface(): return Surface
# get a reference to the currently set display surface
#
# Return a reference to the currently set display Surface. If no
# display mode has been set this will return None.
#
self.fail()
def todo_test_get_wm_info(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_wm_info:
# pygame.display.get_wm_info(): return dict
# Get information about the current windowing system
#
# Creates a dictionary filled with string keys. The strings and values
# are arbitrarily created by the system. Some systems may have no
# information and an empty dictionary will be returned. Most platforms
# will return a "window" key with the value set to the system id for
# the current display.
#
# New with pygame 1.7.1
self.fail()
def todo_test_gl_get_attribute(self):
# __doc__ (as of 2008-08-02) for pygame.display.gl_get_attribute:
# pygame.display.gl_get_attribute(flag): return value
# get the value for an opengl flag for the current display
#
# After calling pygame.display.set_mode() with the pygame.OPENGL flag,
# it is a good idea to check the value of any requested OpenGL
# attributes. See pygame.display.gl_set_attribute() for a list of
# valid flags.
#
self.fail()
def todo_test_gl_set_attribute(self):
# __doc__ (as of 2008-08-02) for pygame.display.gl_set_attribute:
# pygame.display.gl_set_attribute(flag, value): return None
# request an opengl display attribute for the display mode
#
# When calling pygame.display.set_mode() with the pygame.OPENGL flag,
# Pygame automatically handles setting the OpenGL attributes like
# color and doublebuffering. OpenGL offers several other attributes
# you may want control over. Pass one of these attributes as the flag,
# and its appropriate value. This must be called before
# pygame.display.set_mode()
#
# The OPENGL flags are;
# GL_ALPHA_SIZE, GL_DEPTH_SIZE, GL_STENCIL_SIZE, GL_ACCUM_RED_SIZE,
# GL_ACCUM_GREEN_SIZE, GL_ACCUM_BLUE_SIZE, GL_ACCUM_ALPHA_SIZE,
# GL_MULTISAMPLEBUFFERS, GL_MULTISAMPLESAMPLES, GL_STEREO
self.fail()
def todo_test_iconify(self):
# __doc__ (as of 2008-08-02) for pygame.display.iconify:
# pygame.display.iconify(): return bool
# iconify the display surface
#
# Request the window for the display surface be iconified or hidden.
# Not all systems and displays support an iconified display. The
# function will return True if successfull.
#
# When the display is iconified pygame.display.get_active() will
# return False. The event queue should receive a ACTIVEEVENT event
# when the window has been iconified.
#
self.fail()
def todo_test_init(self):
# __doc__ (as of 2008-08-02) for pygame.display.init:
# pygame.display.init(): return None
# initialize the display module
#
# Initializes the pygame display module. The display module cannot do
# anything until it is initialized. This is usually handled for you
# automatically when you call the higher level pygame.init().
#
# Pygame will select from one of several internal display backends
# when it is initialized. The display mode will be chosen depending on
# the platform and permissions of current user. Before the display
# module is initialized the environment variable SDL_VIDEODRIVER can
# be set to control which backend is used. The systems with multiple
# choices are listed here.
#
# Windows : windib, directx
# Unix : x11, dga, fbcon, directfb, ggi, vgl, svgalib, aalib
# On some platforms it is possible to embed the pygame display into an
# already existing window. To do this, the environment variable
# SDL_WINDOWID must be set to a string containing the window id or
# handle. The environment variable is checked when the pygame display
# is initialized. Be aware that there can be many strange side effects
# when running in an embedded display.
#
# It is harmless to call this more than once, repeated calls have no effect.
self.fail()
def todo_test_list_modes(self):
# __doc__ (as of 2008-08-02) for pygame.display.list_modes:
# pygame.display.list_modes(depth=0, flags=pygame.FULLSCREEN): return list
# get list of available fullscreen modes
#
# This function returns a list of possible dimensions for a specified
# color depth. The return value will be an empty list if no display
# modes are available with the given arguments. A return value of -1
# means that any requested resolution should work (this is likely the
# case for windowed modes). Mode sizes are sorted from biggest to
# smallest.
#
# If depth is 0, SDL will choose the current/best color depth for the
# display. The flags defaults to pygame.FULLSCREEN, but you may need
# to add additional flags for specific fullscreen modes.
#
self.fail()
def todo_test_mode_ok(self):
# __doc__ (as of 2008-08-02) for pygame.display.mode_ok:
# pygame.display.mode_ok(size, flags=0, depth=0): return depth
# pick the best color depth for a display mode
#
# This function uses the same arguments as pygame.display.set_mode().
# It is used to depermine if a requested display mode is available. It
# will return 0 if the display mode cannot be set. Otherwise it will
# return a pixel depth that best matches the display asked for.
#
# Usually the depth argument is not passed, but some platforms can
# support multiple display depths. If passed it will hint to which
# depth is a better match.
#
# The most useful flags to pass will be pygame.HWSURFACE,
# pygame.DOUBLEBUF, and maybe pygame.FULLSCREEN. The function will
# return 0 if these display flags cannot be set.
#
self.fail()
def todo_test_quit(self):
# __doc__ (as of 2008-08-02) for pygame.display.quit:
# pygame.display.quit(): return None
# uninitialize the display module
#
# This will shut down the entire display module. This means any active
# displays will be closed. This will also be handled automatically
# when the program exits.
#
# It is harmless to call this more than once, repeated calls have no effect.
self.fail()
def todo_test_set_caption(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_caption:
# pygame.display.set_caption(title, icontitle=None): return None
# set the current window caption
#
# If the display has a window title, this function will change the
# name on the window. Some systems support an alternate shorter title
# to be used for minimized displays.
#
self.fail()
def todo_test_set_gamma(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_gamma:
# pygame.display.set_gamma(red, green=None, blue=None): return bool
# change the hardware gamma ramps
#
# Set the red, green, and blue gamma values on the display hardware.
# If the green and blue arguments are not passed, they will both be
# the same as red. Not all systems and hardware support gamma ramps,
# if the function succeeds it will return True.
#
# A gamma value of 1.0 creates a linear color table. Lower values will
# darken the display and higher values will brighten.
#
self.fail()
def todo_test_set_gamma_ramp(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_gamma_ramp:
# change the hardware gamma ramps with a custom lookup
# pygame.display.set_gamma_ramp(red, green, blue): return bool
# set_gamma_ramp(red, green, blue): return bool
#
# Set the red, green, and blue gamma ramps with an explicit lookup
# table. Each argument should be sequence of 256 integers. The
# integers should range between 0 and 0xffff. Not all systems and
# hardware support gamma ramps, if the function succeeds it will
# return True.
#
self.fail()
def todo_test_set_icon(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_icon:
# pygame.display.set_icon(Surface): return None
# change the system image for the display window
#
# Sets the runtime icon the system will use to represent the display
# window. All windows default to a simple pygame logo for the window
# icon.
#
# You can pass any surface, but most systems want a smaller image
# around 32x32. The image can have colorkey transparency which will be
# passed to the system.
#
# Some systems do not allow the window icon to change after it has
# been shown. This function can be called before
# pygame.display.set_mode() to create the icon before the display mode
# is set.
#
self.fail()
def todo_test_set_mode(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_mode:
# pygame.display.set_mode(resolution=(0,0), flags=0, depth=0): return Surface
# initialize a window or screen for display
#
# This function will create a display Surface. The arguments passed in
# are requests for a display type. The actual created display will be
# the best possible match supported by the system.
#
# The resolution argument is a pair of numbers representing the width
# and height. The flags argument is a collection of additional
# options. The depth argument represents the number of bits to use
# for color.
#
# The Surface that gets returned can be drawn to like a regular
# Surface but changes will eventually be seen on the monitor.
#
# If no resolution is passed or is set to (0, 0) and pygame uses SDL
# version 1.2.10 or above, the created Surface will have the same size
# as the current screen resolution. If only the width or height are
# set to 0, the Surface will have the same width or height as the
# screen resolution. Using a SDL version prior to 1.2.10 will raise an
# exception.
#
# It is usually best to not pass the depth argument. It will default
# to the best and fastest color depth for the system. If your game
# requires a specific color format you can control the depth with this
# argument. Pygame will emulate an unavailable color depth which can
# be slow.
#
# When requesting fullscreen display modes, sometimes an exact match
# for the requested resolution cannot be made. In these situations
# pygame will select the closest compatable match. The returned
# surface will still always match the requested resolution.
#
# The flags argument controls which type of display you want. There
# are several to choose from, and you can even combine multiple types
# using the bitwise or operator, (the pipe "|" character). If you pass
# 0 or no flags argument it will default to a software driven window.
# Here are the display flags you will want to choose from:
#
# pygame.FULLSCREEN create a fullscreen display
# pygame.DOUBLEBUF recommended for HWSURFACE or OPENGL
# pygame.HWSURFACE hardware accelerated, only in FULLSCREEN
# pygame.OPENGL create an opengl renderable display
# pygame.RESIZABLE display window should be sizeable
# pygame.NOFRAME display window will have no border or controls
self.fail()
def todo_test_set_palette(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_palette:
# pygame.display.set_palette(palette=None): return None
# set the display color palette for indexed displays
#
# This will change the video display color palette for 8bit displays.
# This does not change the palette for the actual display Surface,
# only the palette that is used to display the Surface. If no palette
# argument is passed, the system default palette will be restored. The
# palette is a sequence of RGB triplets.
#
self.fail()
def todo_test_toggle_fullscreen(self):
# __doc__ (as of 2008-08-02) for pygame.display.toggle_fullscreen:
# pygame.display.toggle_fullscreen(): return bool
# switch between fullscreen and windowed displays
#
# Switches the display window between windowed and fullscreen modes.
# This function only works under the unix x11 video driver. For most
# situations it is better to call pygame.display.set_mode() with new
# display flags.
#
self.fail()
if __name__ == '__main__':
unittest.main()
|
fedepad/espressopp | refs/heads/master | contrib/mpi4py/mpi4py-2.0.0/demo/mpe-logging/ring.py | 9 | #!/usr/bin/env python
import os
os.environ['MPE_LOGFILE_PREFIX'] = 'ring'
import mpi4py
mpi4py.profile('mpe')
from mpi4py import MPI
from array import array
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
src = rank-1
dest = rank+1
if rank == 0:
src = size-1
if rank == size-1:
dest = 0
try:
from numpy import zeros
a1 = zeros(1000000, 'd')
a2 = zeros(1000000, 'd')
except ImportError:
from array import array
a1 = array('d', [0]*1000); a1 *= 1000
a2 = array('d', [0]*1000); a2 *= 1000
comm.Sendrecv(sendbuf=a1, recvbuf=a2,
source=src, dest=dest)
MPI.Request.Waitall([
comm.Isend(a1, dest=dest),
comm.Irecv(a2, source=src),
])
|
cchurch/ansible | refs/heads/devel | lib/ansible/modules/notification/irc.py | 32 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: irc
version_added: "1.2"
short_description: Send a message to an IRC channel
description:
- Send a message to an IRC channel. This is a very simplistic implementation.
options:
server:
description:
- IRC server name/address
default: localhost
port:
description:
- IRC server port number
default: 6667
nick:
description:
- Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
default: ansible
msg:
description:
- The message body.
required: true
topic:
description:
- Set the channel topic
version_added: "2.0"
color:
description:
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
Added 11 more colors in version 2.0.
default: "none"
choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
"light_blue", "pink", "gray", "light_gray"]
channel:
description:
- Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
required: true
nick_to:
description:
- A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
version_added: "2.0"
key:
description:
- Channel key
version_added: "1.7"
passwd:
description:
- Server password
timeout:
description:
- Timeout to use while waiting for successful registration and join
messages, this is to prevent an endless loop
default: 30
version_added: "1.5"
use_ssl:
description:
- Designates whether TLS/SSL should be used when connecting to the IRC server
type: bool
default: 'no'
version_added: "1.8"
part:
description:
- Designates whether user should part from channel after sending message or not.
Useful for when using a faux bot and not wanting join/parts between messages.
type: bool
default: 'yes'
version_added: "2.0"
style:
description:
- Text style for the message. Note italic does not work on some clients
choices: [ "bold", "underline", "reverse", "italic" ]
version_added: "2.0"
# informational: requirements for nodes
requirements: [ socket ]
author:
- "Jan-Piet Mens (@jpmens)"
- "Matt Martz (@sivel)"
'''
EXAMPLES = '''
- irc:
server: irc.example.net
channel: #t1
msg: Hello world
- local_action:
module: irc
port: 6669
server: irc.example.net
channel: #t1
msg: 'All finished at {{ ansible_date_time.iso8601 }}'
color: red
nick: ansibleIRC
- local_action:
module: irc
port: 6669
server: irc.example.net
channel: #t1
nick_to:
- nick1
- nick2
msg: 'All finished at {{ ansible_date_time.iso8601 }}'
color: red
nick: ansibleIRC
'''
# ===========================================
# IRC module support methods.
#
import re
import socket
import ssl
import time
import traceback
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule
def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None,
nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
'''send message to IRC'''
nick_to = [] if nick_to is None else nick_to
colornumbers = {
'white': "00",
'black': "01",
'blue': "02",
'green': "03",
'red': "04",
'brown': "05",
'purple': "06",
'orange': "07",
'yellow': "08",
'light_green': "09",
'teal': "10",
'light_cyan': "11",
'light_blue': "12",
'pink': "13",
'gray': "14",
'light_gray': "15",
}
stylechoices = {
'bold': "\x02",
'underline': "\x1F",
'reverse': "\x16",
'italic': "\x1D",
}
try:
styletext = stylechoices[style]
except Exception:
styletext = ""
try:
colornumber = colornumbers[color]
colortext = "\x03" + colornumber
except Exception:
colortext = ""
message = styletext + colortext + msg
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if use_ssl:
irc = ssl.wrap_socket(irc)
irc.connect((server, int(port)))
if passwd:
irc.send(to_bytes('PASS %s\r\n' % passwd))
irc.send(to_bytes('NICK %s\r\n' % nick))
irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)))
motd = ''
start = time.time()
while 1:
motd += to_native(irc.recv(1024))
# The server might send back a shorter nick than we specified (due to NICKLEN),
# so grab that and use it from now on (assuming we find the 00[1-4] response).
match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
if match:
nick = match.group('nick')
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC server welcome response')
time.sleep(0.5)
if key:
irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key)))
else:
irc.send(to_bytes('JOIN %s\r\n' % channel))
join = ''
start = time.time()
while 1:
join += to_native(irc.recv(1024))
if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I):
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC JOIN response')
time.sleep(0.5)
if topic is not None:
irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic)))
time.sleep(1)
if nick_to:
for nick in nick_to:
irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message)))
if channel:
irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message)))
time.sleep(1)
if part:
irc.send(to_bytes('PART %s\r\n' % channel))
irc.send(to_bytes('QUIT\r\n'))
time.sleep(1)
irc.close()
# ===========================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
server=dict(default='localhost'),
port=dict(type='int', default=6667),
nick=dict(default='ansible'),
nick_to=dict(required=False, type='list'),
msg=dict(required=True),
color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
"green", "red", "brown",
"purple", "orange", "yellow",
"light_green", "teal", "light_cyan",
"light_blue", "pink", "gray",
"light_gray", "none"]),
style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
channel=dict(required=False),
key=dict(no_log=True),
topic=dict(),
passwd=dict(no_log=True),
timeout=dict(type='int', default=30),
part=dict(type='bool', default=True),
use_ssl=dict(type='bool', default=False)
),
supports_check_mode=True,
required_one_of=[['channel', 'nick_to']]
)
server = module.params["server"]
port = module.params["port"]
nick = module.params["nick"]
nick_to = module.params["nick_to"]
msg = module.params["msg"]
color = module.params["color"]
channel = module.params["channel"]
topic = module.params["topic"]
if topic and not channel:
module.fail_json(msg="When topic is specified, a channel is required.")
key = module.params["key"]
passwd = module.params["passwd"]
timeout = module.params["timeout"]
use_ssl = module.params["use_ssl"]
part = module.params["part"]
style = module.params["style"]
try:
send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
except Exception as e:
module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=False, channel=channel, nick=nick,
msg=msg)
if __name__ == '__main__':
main()
|
Bysmyyr/chromium-crosswalk | refs/heads/master | chrome/test/ispy/server/gs_bucket.py | 88 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implementation of CloudBucket using Google Cloud Storage as the backend."""
import os
import sys
import cloudstorage
from common import cloud_bucket
class GoogleCloudStorageBucket(cloud_bucket.BaseCloudBucket):
"""Subclass of cloud_bucket.CloudBucket with actual GS commands."""
def __init__(self, bucket):
"""Initializes the bucket.
Args:
bucket: the name of the bucket to connect to.
"""
self.bucket = '/' + bucket
def _full_path(self, path):
return self.bucket + '/' + path.lstrip('/')
# override
def UploadFile(self, path, contents, content_type):
gs_file = cloudstorage.open(
self._full_path(path), 'w', content_type=content_type)
gs_file.write(contents)
gs_file.close()
# override
def DownloadFile(self, path):
try:
gs_file = cloudstorage.open(self._full_path(path), 'r')
r = gs_file.read()
gs_file.close()
except Exception as e:
raise Exception('%s: %s' % (self._full_path(path), str(e)))
return r
# override
def UpdateFile(self, path, contents):
if not self.FileExists(path):
raise cloud_bucket.FileNotFoundError
gs_file = cloudstorage.open(self._full_path(path), 'w')
gs_file.write(contents)
gs_file.close()
# override
def RemoveFile(self, path):
cloudstorage.delete(self._full_path(path))
# override
def FileExists(self, path):
try:
cloudstorage.stat(self._full_path(path))
except cloudstorage.NotFoundError:
return False
return True
# override
def GetImageURL(self, path):
return '/image?file_path=%s' % path
# override
def GetAllPaths(self, prefix, max_keys=None, marker=None, delimiter=None):
return (f.filename[len(self.bucket) + 1:] for f in
cloudstorage.listbucket(self.bucket, prefix=prefix,
max_keys=max_keys, marker=marker, delimiter=delimiter))
|
paninetworks/neutron | refs/heads/master | neutron/tests/unit/agent/linux/test_bridge_lib.py | 25 | # Copyright 2015 Intel Corporation.
# Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com>
# <isaku.yamahata at gmail com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import bridge_lib
from neutron.tests import base
class BridgeLibTest(base.BaseTestCase):
"""A test suite to exercise the bridge libraries """
_NAMESPACE = 'test-namespace'
_BR_NAME = 'test-br'
_IF_NAME = 'test-if'
def setUp(self):
super(BridgeLibTest, self).setUp()
ip_wrapper = mock.patch('neutron.agent.linux.ip_lib.IPWrapper').start()
self.execute = ip_wrapper.return_value.netns.execute
def _verify_bridge_mock(self, cmd):
self.execute.assert_called_once_with(cmd, run_as_root=True)
self.execute.reset_mock()
def _test_br(self, namespace=None):
br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace)
self.assertEqual(namespace, br.namespace)
self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME])
br.setfd(0)
self._verify_bridge_mock(['brctl', 'setfd', self._BR_NAME, '0'])
br.disable_stp()
self._verify_bridge_mock(['brctl', 'stp', self._BR_NAME, 'off'])
br.addif(self._IF_NAME)
self._verify_bridge_mock(
['brctl', 'addif', self._BR_NAME, self._IF_NAME])
br.delif(self._IF_NAME)
self._verify_bridge_mock(
['brctl', 'delif', self._BR_NAME, self._IF_NAME])
br.delbr()
self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME])
def test_addbr_with_namespace(self):
self._test_br(self._NAMESPACE)
def test_addbr_without_namespace(self):
self._test_br()
|
mezz64/home-assistant | refs/heads/dev | homeassistant/components/doorbird/switch.py | 5 | """Support for powering relays in a DoorBird video doorbell."""
import datetime
from homeassistant.components.switch import SwitchEntity
import homeassistant.util.dt as dt_util
from .const import DOMAIN, DOOR_STATION, DOOR_STATION_INFO
from .entity import DoorBirdEntity
IR_RELAY = "__ir_light__"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the DoorBird switch platform."""
entities = []
config_entry_id = config_entry.entry_id
doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION]
doorstation_info = hass.data[DOMAIN][config_entry_id][DOOR_STATION_INFO]
relays = doorstation_info["RELAYS"]
relays.append(IR_RELAY)
for relay in relays:
switch = DoorBirdSwitch(doorstation, doorstation_info, relay)
entities.append(switch)
async_add_entities(entities)
class DoorBirdSwitch(DoorBirdEntity, SwitchEntity):
"""A relay in a DoorBird device."""
def __init__(self, doorstation, doorstation_info, relay):
"""Initialize a relay in a DoorBird device."""
super().__init__(doorstation, doorstation_info)
self._doorstation = doorstation
self._relay = relay
self._state = False
self._assume_off = datetime.datetime.min
if relay == IR_RELAY:
self._time = datetime.timedelta(minutes=5)
else:
self._time = datetime.timedelta(seconds=5)
self._unique_id = f"{self._mac_addr}_{self._relay}"
@property
def unique_id(self):
"""Switch unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the switch."""
if self._relay == IR_RELAY:
return f"{self._doorstation.name} IR"
return f"{self._doorstation.name} Relay {self._relay}"
@property
def icon(self):
"""Return the icon to display."""
return "mdi:lightbulb" if self._relay == IR_RELAY else "mdi:dip-switch"
@property
def is_on(self):
"""Get the assumed state of the relay."""
return self._state
def turn_on(self, **kwargs):
"""Power the relay."""
if self._relay == IR_RELAY:
self._state = self._doorstation.device.turn_light_on()
else:
self._state = self._doorstation.device.energize_relay(self._relay)
now = dt_util.utcnow()
self._assume_off = now + self._time
def turn_off(self, **kwargs):
"""Turn off the relays is not needed. They are time-based."""
raise NotImplementedError("DoorBird relays cannot be manually turned off.")
async def async_update(self):
"""Wait for the correct amount of assumed time to pass."""
if self._state and self._assume_off <= dt_util.utcnow():
self._state = False
self._assume_off = datetime.datetime.min
|
watchcat/cbu-rotterdam | refs/heads/master | giveaminute/projectResource.py | 4 | """
:copyright: (c) 2011 Local Projects, all rights reserved
:license: Affero GNU GPL v3, see LICENSE for more details.
"""
from framework.log import log
import helpers.censor as censor
class ProjectResource():
def __init__(self, db, projectResourceId):
self.id = projectResourceId
self.db = db
self.data = self.populateResourceData()
def populateResourceData(self):
sql = """select pr.project_resource_id,
pr.title,
pr.description,
pr.url,
pr.contact_name,
pr.contact_email,
pr.image_id,
pr.location_id,
pr.is_official,
o.user_id as owner_user_id,
o.first_name as owner_first_name,
o.last_name as owner_last_name,
o.email as owner_email
from project_resource pr
left join user o on o.user_id = pr.contact_user_id
where pr.project_resource_id = $id;"""
try:
data = list(self.db.query(sql, {'id':self.id}))
if len(data) > 0:
return data[0]
else:
return None
except Exception, e:
log.info("*** couldn't get project resource info")
log.error(e)
return None
def getFullDictionary(self):
data = dict(image_id = self.data.image_id,
project_resource_id = self.data.project_resource_id,
description = self.data.description,
title = self.data.title,
url = self.data.url,
location_id = self.data.location_id,
is_official = self.data.is_official)
return data
def searchProjectResourcesCount(db, terms, locationId):
count = 0
match = ' '.join([(item + "*") for item in terms])
try:
sql = """select count(*) as count
from project_resource
where
is_active = 1 and is_hidden = 0
and ($locationId is null or location_id = $locationId)
and ($match = '' or match(title, keywords, description) against ($match in boolean mode))"""
data = list(db.query(sql, {'match':match, 'locationId':locationId}))
count = data[0].count
except Exception, e:
log.info("*** couldn't get resources search data")
log.error(e)
return count
def searchProjectResources(db, terms, locationId, limit=1000, offset=0):
data = []
match = ' '.join([(item + "*") for item in terms])
try:
sql = """select project_resource_id as link_id, title, url, image_id, is_official
from project_resource
where
is_active = 1 and is_hidden = 0
and ($locationId is null or location_id = $locationId)
and ($match = '' or match(title, keywords, description) against ($match in boolean mode))
order by created_datetime desc
limit $limit offset $offset"""
data = list(db.query(sql, {'match':match, 'locationId':locationId, 'limit':limit, 'offset':offset }))
except Exception, e:
log.info("*** couldn't get resources search data")
log.error(e)
return data
def updateProjectResourceImage(db, projectResourceId, imageId):
try:
db.update('project_resource', where = "project_resource_id = $id", image_id = imageId, vars = {'id':projectResourceId})
return True
except Exception, e:
log.info("*** couldn't update project image")
log.error(e)
return False
def updateProjectResourceLocation(db, projectResourceId, locationId):
try:
db.update('project_resource', where = "project_resource_id = $id", location_id = locationId, vars = {'id':projectResourceId})
return True
except Exception, e:
log.info("*** couldn't update project location")
log.error(e)
return False
def updateProjectResourceTextData(db, projectResourceId, field, text):
isHidden = (censor.badwords(db, text) > 0)
try:
sql = "update project_resource set %s = $text, is_hidden = $isHidden where project_resource_id = $id" % field
db.query(sql, {'id':projectResourceId, 'text':text, 'isHidden':isHidden})
return True
except Exception, e:
log.info("*** couldn't update project %s" % field)
log.error(e)
return False
def getUnreviewedProjectResources(db, limit = 10, offset = 0):
data = []
try:
sql = """select pr.project_resource_id,
pr.title, pr.description,
pr.image_id,
pr.location_id,
pr.url,
pr.twitter_url,
pr.facebook_url,
pr.physical_address,
pr.contact_name,
pr.contact_email,
replace(pr.keywords, ' ', ',') as keywords,
l.name as location_name
from project_resource pr
left join location l on l.location_id = pr.location_id
where pr.is_active = 1 and pr.is_hidden = 1
limit $limit offset $offset"""
data = list(db.query(sql, {'limit':limit, 'offset':offset}))
except Exception, e:
log.info("*** couldn't get unreviewed resources")
log.error(e)
return data
def approveProjectResource(db, projectResourceId, isOfficial = False):
try:
db.update('project_resource', where = "project_resource_id = $projectResourceId", is_hidden = 0, is_official = isOfficial, vars = {'projectResourceId':projectResourceId})
return True
except Exception, e:
log.info("*** couldn't approve project resource %s" % projectResourceId)
log.error(e)
return False |
GenericStudent/home-assistant | refs/heads/dev | tests/components/volumio/__init__.py | 17 | """Tests for the Volumio integration."""
|
devanlai/mbed | refs/heads/master | workspace_tools/host_tests/example/BroadcastSend.py | 128 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
from time import sleep, time
BROADCAST_PORT = 58083
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
print "Broadcasting..."
data = 'Hello World: ' + repr(time()) + '\n'
s.sendto(data, ('<broadcast>', BROADCAST_PORT))
sleep(1)
|
ProgVal/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/test/mac/gyptest-xctest.py | 221 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xctest targets are correctly configured.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Ignore this test if Xcode 5 is not installed
import subprocess
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
raise Exception('Error %d running xcodebuild' % job.returncode)
xcode_version, build_number = out.splitlines()
# Convert the version string from 'Xcode 5.0' to ['5','0'].
xcode_version = xcode_version.split()[-1].split('.')
if xcode_version < ['5']:
test.pass_test()
CHDIR = 'xctest'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
'foo\n', chdir=CHDIR)
test.pass_test()
|
cansik/pg4nosql | refs/heads/master | pg4nosql/PostgresNoSQLUtil.py | 1 | import json
from pg4nosql.PostgresNoSQLResultItem import PostgresNoSQLResultItem
from psycopg2.extensions import adapt, AsIs
__author__ = 'cansik'
def to_sql_string(obj):
if obj is None:
return AsIs(obj)
return str(obj)
def to_nullable_string(obj):
if obj is None:
return 'Null'
if isinstance(obj, dict) or isinstance(obj, list):
return adapt(json.dumps(obj))
return adapt(str(obj))
|
MiLk/youtube-dl | refs/heads/master | youtube_dl/extractor/steam.py | 2 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unescapeHTML,
)
class SteamIE(InfoExtractor):
_VALID_URL = r"""(?x)http://store\.steampowered\.com/
(agecheck/)?
(?P<urltype>video|app)/ #If the page is only for videos or for a game
(?P<gameID>\d+)/?
(?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
"""
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
_TEST = {
"url": "http://store.steampowered.com/video/105600/",
"playlist": [
{
"md5": "f870007cee7065d7c76b88f0a45ecc07",
"info_dict": {
'id': '81300',
'ext': 'flv',
"title": "Terraria 1.1 Trailer",
'playlist_index': 1,
}
},
{
"md5": "61aaf31a5c5c3041afb58fb83cbb5751",
"info_dict": {
'id': '80859',
'ext': 'flv',
"title": "Terraria Trailer",
'playlist_index': 2,
}
}
],
'params': {
'playlistend': 2,
}
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url, re.VERBOSE)
gameID = m.group('gameID')
videourl = self._VIDEO_PAGE_TEMPLATE % gameID
webpage = self._download_webpage(videourl, gameID)
if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
videourl = self._AGECHECK_TEMPLATE % gameID
self.report_age_confirmation()
webpage = self._download_webpage(videourl, gameID)
self.report_extraction(gameID)
game_title = self._html_search_regex(r'<h2 class="pageheader">(.*?)</h2>',
webpage, 'game title')
mweb = re.finditer(
r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},",
webpage)
titles = re.finditer(
r'<span class="title">(?P<videoName>.+?)</span>', webpage)
thumbs = re.finditer(
r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">', webpage)
videos = []
for vid, vtitle, thumb in zip(mweb, titles, thumbs):
video_id = vid.group('videoID')
title = vtitle.group('videoName')
video_url = vid.group('videoURL')
video_thumb = thumb.group('thumbnail')
if not video_url:
raise ExtractorError('Cannot find video url for %s' % video_id)
videos.append({
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': unescapeHTML(title),
'thumbnail': video_thumb
})
return self.playlist_result(videos, gameID, game_title)
|
smart-developerr/my-first-blog | refs/heads/master | Lib/site-packages/pip/_vendor/requests/packages/chardet/constants.py | 3007 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
rootulp/xpython | refs/heads/master | exercises/bracket-push/example.py | 7 | def check_brackets(string):
counterparts = {')': '(', '}': '{', ']': '['}
stack = []
for char in string:
if char in counterparts.values():
stack.append(char)
elif char in counterparts:
if not stack:
return False
if stack.pop() != counterparts[char]:
return False
return not stack
|
victorpoluceno/shortener_frontend | refs/heads/master | rest_api/views.py | 6027 | # Create your views here.
|
endlessm/chromium-browser | refs/heads/master | third_party/skia/infra/bots/gen_compile_isolate.py | 4 | #!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import difflib
import os
import re
import subprocess
import sys
# Any files in Git which match these patterns will be included, either directly
# or indirectly via a parent dir.
PATH_PATTERNS = [
r'.*\.c$',
r'.*\.cc$',
r'.*\.cpp$',
r'.*\.gn$',
r'.*\.gni$',
r'.*\.h$',
r'.*\.mm$',
r'.*\.storyboard$',
]
# These paths are always added to the inclusion list. Note that they may not
# appear in the isolate if they are included indirectly via a parent dir.
EXPLICIT_PATHS = [
'../.gclient',
'.clang-format',
'.clang-tidy',
'bin/fetch-clang-format',
'bin/fetch-gn',
'buildtools',
'infra/bots/assets/android_ndk_darwin/VERSION',
'infra/bots/assets/android_ndk_linux/VERSION',
'infra/bots/assets/android_ndk_windows/VERSION',
'infra/bots/assets/cast_toolchain/VERSION',
'infra/bots/assets/clang_linux/VERSION',
'infra/bots/assets/clang_win/VERSION',
'infra/canvaskit',
'infra/pathkit',
'resources',
'third_party/externals',
]
# If a parent path contains more than this many immediate child paths (ie. files
# and dirs which are directly inside it as opposed to indirect descendants), we
# will include the parent in the isolate file instead of the children. This
# results in a simpler isolate file which should need to be changed less often.
COMBINE_PATHS_THRESHOLD = 3
# Template for the isolate file content.
ISOLATE_TMPL = '''{
'includes': [
'run_recipe.isolate',
],
'variables': {
'files': [
%s
],
},
}
'''
# Absolute path to the infra/bots dir.
INFRABOTS_DIR = os.path.realpath(os.path.dirname(os.path.abspath(__file__)))
# Absolute path to the compile.isolate file.
ISOLATE_FILE = os.path.join(INFRABOTS_DIR, 'compile.isolate')
def all_paths():
"""Return all paths which are checked in to git."""
repo_root = os.path.abspath(os.path.join(INFRABOTS_DIR, os.pardir, os.pardir))
output = subprocess.check_output(['git', 'ls-files'], cwd=repo_root).rstrip()
return output.splitlines()
def get_relevant_paths():
"""Return all checked-in paths in PATH_PATTERNS or EXPLICIT_PATHS."""
paths = []
for f in all_paths():
for regexp in PATH_PATTERNS:
if re.match(regexp, f):
paths.append(f)
break
paths.extend(EXPLICIT_PATHS)
return paths
class Tree(object):
"""Tree helps with deduplicating and collapsing paths."""
class Node(object):
"""Node represents an individual node in a Tree."""
def __init__(self, name):
self._children = {}
self._name = name
self._is_leaf = False
@property
def is_root(self):
"""Return True iff this is the root node."""
return self._name is None
def add(self, entry):
"""Add the given entry (given as a list of strings) to the Node."""
# Remove the first element if we're not the root node.
if not self.is_root:
if entry[0] != self._name:
raise ValueError('Cannot add a non-matching entry to a Node!')
entry = entry[1:]
# If the entry is now empty, this node is a leaf.
if not entry:
self._is_leaf = True
return
# Add a child node.
if not self._is_leaf:
child = self._children.get(entry[0])
if not child:
child = Tree.Node(entry[0])
self._children[entry[0]] = child
child.add(entry)
# If we have more than COMBINE_PATHS_THRESHOLD immediate children,
# combine them into this node.
immediate_children = 0
for child in self._children.itervalues():
if child._is_leaf:
immediate_children += 1
if not self.is_root and immediate_children >= COMBINE_PATHS_THRESHOLD:
self._is_leaf = True
self._children = {}
def entries(self):
"""Return the entries represented by this node and its children.
Will not return children in the following cases:
- This Node is a leaf, ie. it represents an entry which was explicitly
inserted into the Tree, as opposed to only part of a path to other
entries.
- This Node has immediate children exceeding COMBINE_PATHS_THRESHOLD and
thus has been upgraded to a leaf node.
"""
if self._is_leaf:
return [self._name]
rv = []
for child in self._children.itervalues():
for entry in child.entries():
if not self.is_root:
entry = self._name + '/' + entry
rv.append(entry)
return rv
def __init__(self):
self._root = Tree.Node(None)
def add(self, entry):
"""Add the given entry to the tree."""
split = entry.split('/')
if split[-1] == '':
split = split[:-1]
self._root.add(split)
def entries(self):
"""Return the list of entries in the tree.
Entries will be de-duplicated as follows:
- Any entry which is a sub-path of another entry will not be returned.
- Any entry which was not explicitly inserted but has children exceeding
the COMBINE_PATHS_THRESHOLD will be returned while its children will not
be returned.
"""
return self._root.entries()
def relpath(repo_path):
"""Return a relative path to the given path within the repo.
The path is relative to the infra/bots dir, where the compile.isolate file
lives.
"""
repo_path = '../../' + repo_path
repo_path = repo_path.replace('../../infra/', '../')
repo_path = repo_path.replace('../bots/', '')
return repo_path
def get_isolate_content(paths):
"""Construct the new content of the isolate file based on the given paths."""
lines = [' \'%s\',' % relpath(p) for p in paths]
lines.sort()
return ISOLATE_TMPL % '\n'.join(lines)
def main():
"""Regenerate the compile.isolate file, or verify that it hasn't changed."""
testing = False
if len(sys.argv) == 2 and sys.argv[1] == 'test':
testing = True
elif len(sys.argv) != 1:
print >> sys.stderr, 'Usage: %s [test]' % sys.argv[0]
sys.exit(1)
tree = Tree()
for p in get_relevant_paths():
tree.add(p)
content = get_isolate_content(tree.entries())
if testing:
with open(ISOLATE_FILE, 'rb') as f:
expect_content = f.read()
if content != expect_content:
print >> sys.stderr, 'Found diff in %s:' % ISOLATE_FILE
a = expect_content.splitlines()
b = content.splitlines()
diff = difflib.context_diff(a, b, lineterm='')
for line in diff:
sys.stderr.write(line + '\n')
print >> sys.stderr, 'You may need to run:\n\n\tpython %s' % sys.argv[0]
sys.exit(1)
else:
with open(ISOLATE_FILE, 'wb') as f:
f.write(content)
if __name__ == '__main__':
main()
|
googleinterns/learnbase | refs/heads/master | learnbase/src/main/webapp/WEB-INF/Lib/re.py | 153 | #
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9].
\D Matches any non-digit character; equivalent to the set [^0-9].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
"U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum = frozenset(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 100
def _compile(*key):
# internal: compile pattern
cachekey = (type(key[0]),) + key
p = _cache.get(cachekey)
if p is not None:
return p
pattern, flags = key
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError('Cannot process flags argument with a compiled pattern')
return pattern
if not sre_compile.isstring(pattern):
raise TypeError, "first argument must be string or compiled pattern"
try:
p = sre_compile.compile(pattern, flags)
except error, v:
raise error, v # invalid expression
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[cachekey] = p
return p
def _compile_repl(*key):
# internal: compile replacement pattern
p = _cache_repl.get(key)
if p is not None:
return p
repl, pattern = key
try:
p = sre_parse.parse_template(repl, pattern)
except error, v:
raise error, v # invalid expression
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[key] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copy_reg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if hasattr(action, '__call__'):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
|
jlopex/kernel_linaro_snowball | refs/heads/mesh | Documentation/target/tcm_mod_builder.py | 4981 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
PolicyStat/selenium-old | refs/heads/master | py/test/selenium/webdriver/firefox/test_ff_executing_javascript_test.py | 4 | #!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from selenium.test.selenium.webdriver.common import executing_javascript_test
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
def setup_module(module):
webserver = SimpleWebServer()
webserver.start()
FirefoxExecutingJavaScriptTests.webserver = webserver
FirefoxExecutingJavaScriptTests.driver = webdriver.Firefox()
class FirefoxExecutingJavaScriptTests(executing_javascript_test.ExecutingJavaScriptTests):
pass
def teardown_module(module):
FirefoxExecutingJavaScriptTests.driver.quit()
FirefoxExecutingJavaScriptTests.webserver.stop()
|
bdryburgh/pynet_testX | refs/heads/master | str_ex2.py | 1 | #!/usr/bin/env python
from __future__ import print_function
try:
# PY2
ip_addr = raw_input("Please enter IP address: ")
except NameError:
# PY3
ip_addr = input("Please enter IP address: ")
ip_addr = ip_addr.split(".")
print()
print("=" * 70)
print("{:<20} {:<20} {:<20} {:<20}".format(*ip_addr))
print("=" * 70)
print()
|
Korkki/django | refs/heads/master | tests/sitemaps_tests/base.py | 380 | from django.apps import apps
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.test import TestCase, modify_settings, override_settings
from .models import I18nTestModel, TestModel
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sitemaps'})
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.http')
class SitemapTestsBase(TestCase):
protocol = 'http'
sites_installed = apps.is_installed('django.contrib.sites')
domain = 'example.com' if sites_installed else 'testserver'
def setUp(self):
self.base_url = '%s://%s' % (self.protocol, self.domain)
cache.clear()
# Create an object for sitemap content.
TestModel.objects.create(name='Test Object')
self.i18n_model = I18nTestModel.objects.create(name='Test Object')
@classmethod
def setUpClass(cls):
super(SitemapTestsBase, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
|
eeshangarg/oh-mainline | refs/heads/master | vendor/packages/sphinx/sphinx-quickstart.py | 15 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sphinx - Python documentation toolchain
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
if __name__ == '__main__':
from sphinx.quickstart import main
sys.exit(main(sys.argv))
|
henrykironde/scikit-learn | refs/heads/master | sklearn/externals/joblib/_multiprocessing_helpers.py | 326 | """Helper module to factorize the conditional multiprocessing import logic
We use a distinct module to simplify import statements and avoid introducing
circular dependencies (for instance for the assert_spawning name).
"""
import os
import warnings
# Obtain possible configuration from the environment, assuming 1 (on)
# by default, upon 0 set to None. Should instructively fail if some non
# 0/1 value is set.
mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if mp:
try:
import multiprocessing as mp
import multiprocessing.pool
except ImportError:
mp = None
# 2nd stage: validate that locking is available on the system and
# issue a warning if not
if mp is not None:
try:
_sem = mp.Semaphore()
del _sem # cleanup
except (ImportError, OSError) as e:
mp = None
warnings.warn('%s. joblib will operate in serial mode' % (e,))
# 3rd stage: backward compat for the assert_spawning helper
if mp is not None:
try:
# Python 3.4+
from multiprocessing.context import assert_spawning
except ImportError:
from multiprocessing.forking import assert_spawning
else:
assert_spawning = None
|
nirmeshk/oh-mainline | refs/heads/master | mysite/missions/shell/views.py | 15 | # This file is part of OpenHatch.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mysite.missions.base import Mission, MissionBaseView
from mysite.missions.base.views import (
MissionPageState,
view,
login_required
)
from mysite.missions.base.view_helpers import (
mission_completed,
set_mission_completed,
)
@login_required
def command_cd_submit(request):
# Initialize data array and some default values.
data = {}
data['command_cd_success'] = False
data['command_cd_error_message'] = ''
if request.method == 'POST':
selected = request.POST.get('option')
if selected == '3':
data['command_cd_success'] = True
set_mission_completed(
request.user.get_profile(), 'command_cd')
else:
data['command_cd_error_message'] = 'Oops! wrong answer \n Hint: \
see the second point'
return command_cd(request, data)
@login_required
def command_ls_submit(request):
# Initialize data array and some default values.
data = {}
data['command_ls_success'] = False
data['command_ls_error_message'] = ''
if request.method == 'POST':
selected = request.POST.get('option')
if selected == '1':
data['command_ls_success'] = True
set_mission_completed(
request.user.get_profile(), 'command_ls')
else:
data['command_ls_error_message'] = 'Oops! wrong answer \n Hint: \
type "ls --help" to view list of options'
return command_ls(request, data)
@login_required
def command_mkdir_rm_submit(request):
# Initialize data array and some default values.
data = {}
data['command_mkdir_rm_success'] = False
data['command_mkdir_rm_error_message'] = ''
if request.method == 'POST':
selected = request.POST.get('option')
if selected == '3':
data['command_mkdir_rm_success'] = True
set_mission_completed(
request.user.get_profile(), 'command_mkdir_rm')
else:
data['command_mkdir_rm_error_message'] = 'Oops! wrong answer \n \
Hint: here, "music" directory is removed and "videos" directory \
is created'
return command_mkdir_rm(request, data)
@login_required
def command_cp_mv_submit(request):
# Initialize data array and some default values.
data = {}
data['command_cp_mv_success'] = False
data['command_cp_mv_error_message'] = ''
if request.method == 'POST':
selected = request.POST.get('option')
if selected == '4':
data['command_cp_mv_success'] = True
set_mission_completed(
request.user.get_profile(), 'command_cp_mv')
else:
data['command_cp_mv_error_message'] = 'Oops! wrong answer \n \
Hint: here, "test.txt" is renamed to "songs.txt" and then it is\
copied to "music" directory'
return command_cp_mv(request, data)
@view
def about(request, passed_data={}):
state = ShellMissionPageState(request, passed_data)
state.this_mission_page_short_name = 'About'
return (request, 'missions/shell/about.html',
state.as_dict_for_template_context())
@view
def command_cd(request, passed_data={}):
state = ShellMissionPageState(request, passed_data)
state.this_mission_page_short_name = 'Using cd command'
data = state.as_dict_for_template_context()
return (request, 'missions/shell/cd.html',
state.as_dict_for_template_context())
@view
def command_ls(request, passed_data={}):
state = ShellMissionPageState(request, passed_data)
state.this_mission_page_short_name = 'Using ls command'
data = state.as_dict_for_template_context()
return (request, 'missions/shell/ls.html',
state.as_dict_for_template_context())
@view
def command_mkdir_rm(request, passed_data={}):
state = ShellMissionPageState(request, passed_data)
state.this_mission_page_short_name = 'Using mkdir and rm command'
data = state.as_dict_for_template_context()
return (request, 'missions/shell/mkdir_rm.html',
state.as_dict_for_template_context())
@view
def command_cp_mv(request, passed_data={}):
state = ShellMissionPageState(request, passed_data)
state.this_mission_page_short_name = 'Using cp and mv command'
data = state.as_dict_for_template_context()
return (request, 'missions/shell/cp_mv.html',
state.as_dict_for_template_context())
@view
def file_and_directory(request, passed_data={}):
state = ShellMissionPageState(request, passed_data)
state.this_mission_page_short_name = 'Differece between file and \
directory'
return (request, 'missions/shell/structure.html',
state.as_dict_for_template_context())
@view
def more_info(request, passed_data={}):
state = ShellMissionPageState(request, passed_data)
state.this_mission_page_short_name = 'More Information'
return (request, 'missions/shell/resources.html',
state.as_dict_for_template_context())
# State Manager
class ShellMissionPageState(MissionPageState):
def __init__(self, request, passed_data):
super(ShellMissionPageState, self).__init__(
request, passed_data, 'Using command line shell')
def as_dict_for_template_context(self):
(data, person) = self.get_base_data_dict_and_person()
if person:
data.update({
'command_cd_done': mission_completed(person, 'command_cd'),
'command_ls_done': mission_completed(person, 'command_ls'),
'command_mkdir_rm_done': mission_completed(person,
'command_mkdir_rm'),
'command_cp_mv_done': mission_completed(person,
'command_cp_mv'),
})
return data
|
ekesken/istatistikciadamlazim | refs/heads/master | openid/consumer/html_parse.py | 167 | """
This module implements a VERY limited parser that finds <link> tags in
the head of HTML or XHTML documents and parses out their attributes
according to the OpenID spec. It is a liberal parser, but it requires
these things from the data in order to work:
- There must be an open <html> tag
- There must be an open <head> tag inside of the <html> tag
- Only <link>s that are found inside of the <head> tag are parsed
(this is by design)
- The parser follows the OpenID specification in resolving the
attributes of the link tags. This means that the attributes DO NOT
get resolved as they would by an XML or HTML parser. In particular,
only certain entities get replaced, and href attributes do not get
resolved relative to a base URL.
From http://openid.net/specs.bml#linkrel:
- The openid.server URL MUST be an absolute URL. OpenID consumers
MUST NOT attempt to resolve relative URLs.
- The openid.server URL MUST NOT include entities other than &,
<, >, and ".
The parser ignores SGML comments and <![CDATA[blocks]]>. Both kinds of
quoting are allowed for attributes.
The parser deals with invalid markup in these ways:
- Tag names are not case-sensitive
- The <html> tag is accepted even when it is not at the top level
- The <head> tag is accepted even when it is not a direct child of
the <html> tag, but a <html> tag must be an ancestor of the <head>
tag
- <link> tags are accepted even when they are not direct children of
the <head> tag, but a <head> tag must be an ancestor of the <link>
tag
- If there is no closing tag for an open <html> or <head> tag, the
remainder of the document is viewed as being inside of the tag. If
there is no closing tag for a <link> tag, the link tag is treated
as a short tag. Exceptions to this rule are that <html> closes
<html> and <body> or <head> closes <head>
- Attributes of the <link> tag are not required to be quoted.
- In the case of duplicated attribute names, the attribute coming
last in the tag will be the value returned.
- Any text that does not parse as an attribute within a link tag will
be ignored. (e.g. <link pumpkin rel='openid.server' /> will ignore
pumpkin)
- If there are more than one <html> or <head> tag, the parser only
looks inside of the first one.
- The contents of <script> tags are ignored entirely, except unclosed
<script> tags. Unclosed <script> tags are ignored.
- Any other invalid markup is ignored, including unclosed SGML
comments and unclosed <![CDATA[blocks.
"""
__all__ = ['parseLinkAttrs']
import re
flags = ( re.DOTALL # Match newlines with '.'
| re.IGNORECASE
| re.VERBOSE # Allow comments and whitespace in patterns
| re.UNICODE # Make \b respect Unicode word boundaries
)
# Stuff to remove before we start looking for tags
removed_re = re.compile(r'''
# Comments
<!--.*?-->
# CDATA blocks
| <!\[CDATA\[.*?\]\]>
# script blocks
| <script\b
# make sure script is not an XML namespace
(?!:)
[^>]*>.*?</script>
''', flags)
tag_expr = r'''
# Starts with the tag name at a word boundary, where the tag name is
# not a namespace
<%(tag_name)s\b(?!:)
# All of the stuff up to a ">", hopefully attributes.
(?P<attrs>[^>]*?)
(?: # Match a short tag
/>
| # Match a full tag
>
(?P<contents>.*?)
# Closed by
(?: # One of the specified close tags
</?%(closers)s\s*>
# End of the string
| \Z
)
)
'''
def tagMatcher(tag_name, *close_tags):
if close_tags:
options = '|'.join((tag_name,) + close_tags)
closers = '(?:%s)' % (options,)
else:
closers = tag_name
expr = tag_expr % locals()
return re.compile(expr, flags)
# Must contain at least an open html and an open head tag
html_find = tagMatcher('html')
head_find = tagMatcher('head', 'body')
link_find = re.compile(r'<link\b(?!:)', flags)
attr_find = re.compile(r'''
# Must start with a sequence of word-characters, followed by an equals sign
(?P<attr_name>\w+)=
# Then either a quoted or unquoted attribute
(?:
# Match everything that\'s between matching quote marks
(?P<qopen>["\'])(?P<q_val>.*?)(?P=qopen)
|
# If the value is not quoted, match up to whitespace
(?P<unq_val>(?:[^\s<>/]|/(?!>))+)
)
|
(?P<end_link>[<>])
''', flags)
# Entity replacement:
replacements = {
'amp':'&',
'lt':'<',
'gt':'>',
'quot':'"',
}
ent_replace = re.compile(r'&(%s);' % '|'.join(replacements.keys()))
def replaceEnt(mo):
"Replace the entities that are specified by OpenID"
return replacements.get(mo.group(1), mo.group())
def parseLinkAttrs(html):
"""Find all link tags in a string representing a HTML document and
return a list of their attributes.
@param html: the text to parse
@type html: str or unicode
@return: A list of dictionaries of attributes, one for each link tag
@rtype: [[(type(html), type(html))]]
"""
stripped = removed_re.sub('', html)
html_mo = html_find.search(stripped)
if html_mo is None or html_mo.start('contents') == -1:
return []
start, end = html_mo.span('contents')
head_mo = head_find.search(stripped, start, end)
if head_mo is None or head_mo.start('contents') == -1:
return []
start, end = head_mo.span('contents')
link_mos = link_find.finditer(stripped, head_mo.start(), head_mo.end())
matches = []
for link_mo in link_mos:
start = link_mo.start() + 5
link_attrs = {}
for attr_mo in attr_find.finditer(stripped, start):
if attr_mo.lastgroup == 'end_link':
break
# Either q_val or unq_val must be present, but not both
# unq_val is a True (non-empty) value if it is present
attr_name, q_val, unq_val = attr_mo.group(
'attr_name', 'q_val', 'unq_val')
attr_val = ent_replace.sub(replaceEnt, unq_val or q_val)
link_attrs[attr_name] = attr_val
matches.append(link_attrs)
return matches
def relMatches(rel_attr, target_rel):
"""Does this target_rel appear in the rel_str?"""
# XXX: TESTME
rels = rel_attr.strip().split()
for rel in rels:
rel = rel.lower()
if rel == target_rel:
return 1
return 0
def linkHasRel(link_attrs, target_rel):
"""Does this link have target_rel as a relationship?"""
# XXX: TESTME
rel_attr = link_attrs.get('rel')
return rel_attr and relMatches(rel_attr, target_rel)
def findLinksRel(link_attrs_list, target_rel):
"""Filter the list of link attributes on whether it has target_rel
as a relationship."""
# XXX: TESTME
matchesTarget = lambda attrs: linkHasRel(attrs, target_rel)
return filter(matchesTarget, link_attrs_list)
def findFirstHref(link_attrs_list, target_rel):
"""Return the value of the href attribute for the first link tag
in the list that has target_rel as a relationship."""
# XXX: TESTME
matches = findLinksRel(link_attrs_list, target_rel)
if not matches:
return None
first = matches[0]
return first.get('href')
|
matthewelse/micropython | refs/heads/emscripten | tests/basics/int_divmod.py | 37 | # test integer floor division and modulo
# test all combination of +/-/0 cases
for i in range(-2, 3):
for j in range(-4, 5):
if j != 0:
print(i, j, i // j, i % j, divmod(i, j))
# this tests bignum modulo
a = 987654321987987987987987987987
b = 19
print(a % b)
print(a % -b)
print(-a % b)
print(-a % -b)
|
dongjoon-hyun/DIGITS | refs/heads/master | digits/model/__init__.py | 8 | # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .images import *
from .job import ModelJob
|
Thraxis/SickRage | refs/heads/master | lib/github/GitObject.py | 74 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class GitObject(github.GithubObject.NonCompletableGithubObject):
"""
This class represents GitObjects as returned for example by http://developer.github.com/v3/todo
"""
@property
def sha(self):
"""
:type: string
"""
return self._sha.value
@property
def type(self):
"""
:type: string
"""
return self._type.value
@property
def url(self):
"""
:type: string
"""
return self._url.value
def _initAttributes(self):
self._sha = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
mrroach/CentralServer | refs/heads/master | csrv/model/actions/break_subroutine.py | 1 | """Break a subroutine with an icebreaker."""
from csrv.model import appropriations
from csrv.model import cost
from csrv.model import errors
from csrv.model import events
from csrv.model import game_object
from csrv.model import parameters
from csrv.model.actions import action
from csrv.model.cards import card_info
class BreakSubroutine(action.Action):
DESCRIPTION = 'Break a subroutine'
REQUIRED_KEYWORDS = set()
def __init__(self, game, player, card, subroutine, credits=1, clicks=0):
cost_obj = cost.SimpleCost(game, player, credits=credits, clicks=clicks)
action.Action.__init__(self, game, player, cost=cost_obj)
self.card = card
if isinstance(subroutine, list):
self.subroutines = subroutine
else:
self.subroutines = [subroutine]
if card_info.ICEBREAKER in self.card.KEYWORDS:
self.cost.appropriations.append(appropriations.USE_ICEBREAKERS)
def is_usable(self):
if self.REQUIRED_KEYWORDS:
if not self.REQUIRED_KEYWORDS & self.game.run.current_ice().KEYWORDS:
return False
return (self.cost.can_pay() and
self.card.strength >= self.game.run.current_ice().strength and not
self.subroutines_broken())
def subroutines_broken(self):
return any([s.is_broken for s in self.subroutines])
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
action.Action.resolve(
self,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.card.log('The runner breaks "%s" with %s' %
(', '.join(str(s) for s in self.subroutines), self.card))
for subroutine in self.subroutines:
subroutine.is_broken = True
@property
def description(self):
message = 'Break "%s" with %s' % (
','.join(str(s) for s in self.subroutines), self.card)
costs = []
if self.cost.clicks():
costs.append(','.join(['[click]' for i in range(self.cost.clicks())]))
if self.cost.credits():
costs.append('%s [credits]' % self.cost.credits())
if costs:
message = '%s: %s' % (','.join(costs), message)
return message
|
leag/swftools | refs/heads/master | spec/textarea3.py | 10 | from sys import *
from pdflib_py import *
import md5
import Image
import math
img = Image.open("baboon.png")
width, height = img.size
img.load()
p = PDF_new()
PDF_open_file(p, "textarea3.pdf")
PDF_set_parameter(p, "usercoordinates", "true")
PDF_set_info(p, "Creator", "smalltext.py")
PDF_begin_page(p, width, height)
font = PDF_load_font(p, "Courier", "host", "")
PDF_setrgbcolor_fill(p, 0.0, 0.0, 0.0)
PDF_moveto(p, 0, 0)
PDF_lineto(p, width, 0)
PDF_lineto(p, width, height)
PDF_lineto(p, 0, height)
PDF_lineto(p, 0, 0)
PDF_fill(p)
PDF_setfont(p, font, 4.0)
i = 0
for y in range(height / 6):
text = "".join([md5.md5(str(i+j*732849)).hexdigest() for j in range(9)])
for x in range(width / 6):
r,g,b = img.getpixel((x*6, height-1-y*6))
l = math.sqrt(r*r+g*g+b*b)
if not r and not g and not b:
continue
white = (l / 444.0)*5
PDF_setfont(p, font, 0.5+int(white)*4)
r = 0.3 + 0.3 * int((r/l)*3)
g = 0.3 + 0.3 * int((g/l)*3)
b = 0.3 + 0.3 * int((b/l)*3)
PDF_setrgbcolor_fill(p, r, g, b)
PDF_set_text_pos(p, x*6, y*6);
PDF_show(p, text[x])
i = i + 1
PDF_end_page(p)
PDF_close(p)
PDF_delete(p);
|
fqxp/nete-gtk | refs/heads/master | nete/state/utils/note_list.py | 1 | from pyrsistent import freeze
def add_new(notes, note):
return freeze(ordered(notes.append(note)))
def without(notes, title):
return freeze(
ordered(
[note for note in notes if note['title'] != title]
))
def ordered(notes):
return freeze(
sorted(
notes,
key=lambda note: note['title'].lower()))
def is_visible(note_title, filter_term):
return not filter_term or filter_term.lower() in note_title.lower()
|
f1aky/xadmin | refs/heads/django1.10 | xadmin/adminx.py | 6 | import xadmin
from models import UserSettings, Log
from xadmin.layout import *
from django.utils.translation import ugettext_lazy as _, ugettext
class UserSettingsAdmin(object):
model_icon = 'fa fa-cog'
hidden_menu = True
xadmin.site.register(UserSettings, UserSettingsAdmin)
class LogAdmin(object):
def link(self, instance):
if instance.content_type and instance.object_id and instance.action_flag != 'delete':
admin_url = self.get_admin_url('%s_%s_change' % (instance.content_type.app_label, instance.content_type.model),
instance.object_id)
return "<a href='%s'>%s</a>" % (admin_url, _('Admin Object'))
else:
return ''
link.short_description = ""
link.allow_tags = True
link.is_column = False
list_display = ('action_time', 'user', 'ip_addr', '__str__', 'link')
list_filter = ['user', 'action_time']
search_fields = ['ip_addr', 'message']
model_icon = 'fa fa-cog'
xadmin.site.register(Log, LogAdmin)
|
joberreiter/pyload | refs/heads/stable | module/plugins/crypter/RelinkUs.py | 1 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import binascii
import re
import os
from Crypto.Cipher import AES
from module.plugins.internal.Crypter import Crypter
from module.utils import save_join as fs_join
class RelinkUs(Crypter):
__name__ = "RelinkUs"
__type__ = "crypter"
__version__ = "3.14"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?relink\.us/(f/|((view|go)\.php\?id=))(?P<ID>.+)'
__config__ = [("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """Relink.us decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
("AndroKev", "neureither.kevin@gmail.com")]
PREFERRED_LINK_SOURCES = ["cnl2", "dlc", "web"]
OFFLINE_TOKEN = r'<title>Tattooside'
PASSWORD_TOKEN = r'container_password.php'
PASSWORD_ERROR_ROKEN = r'You have entered an incorrect password'
PASSWORD_SUBMIT_URL = r'http://www.relink.us/container_password.php'
CAPTCHA_TOKEN = r'container_captcha.php'
CAPTCHA_ERROR_ROKEN = r'You have solved the captcha wrong'
CAPTCHA_IMG_URL = r'http://www.relink.us/core/captcha/circlecaptcha.php'
CAPTCHA_SUBMIT_URL = r'http://www.relink.us/container_captcha.php'
FILE_TITLE_REGEX = r'<th>Title</th><td>(.*)</td></tr>'
FILE_NOTITLE = r'No title'
CNL2_FORM_REGEX = r'<form id="cnl_form-(.*?)</form>'
CNL2_FORMINPUT_REGEX = r'<input.*?name="%s".*?value="(.*?)"'
CNL2_JK_KEY = "jk"
CNL2_CRYPTED_KEY = "crypted"
DLC_LINK_REGEX = r'<a href=".*?" class="dlc_button" target="_blank">'
DLC_DOWNLOAD_URL = r'http://www.relink.us/download.php'
WEB_FORWARD_REGEX = r'getFile\(\'(.+)\'\)'
WEB_FORWARD_URL = r'http://www.relink.us/frame.php'
WEB_LINK_REGEX = r'<iframe name="Container" height="100%" frameborder="no" width="100%" src="(.+)"></iframe>'
def setup(self):
self.fileid = None
self.package = None
self.captcha = False
def decrypt(self, pyfile):
#: Init
self.init_package(pyfile)
#: Request package
self.request_package()
#: Check for online
if not self.is_online():
self.offline()
#: Check for protection
if self.is_password_protected():
self.unlock_password_protection()
self.handle_errors()
if self.is_captcha_protected():
self.captcha = True
self.unlock_captcha_protection()
self.handle_errors()
#: Get package name and folder
(package_name, folder_name) = self.get_package_info()
#: Extract package links
package_links = []
for sources in self.PREFERRED_LINK_SOURCES:
package_links.extend(self.handle_link_source(sources))
if package_links: #: Use only first source which provides links
break
package_links = set(package_links)
#: Pack
if package_links:
self.packages = [(package_name, package_links, folder_name)]
def init_package(self, pyfile):
self.fileid = re.match(self.__pattern__, pyfile.url).group('ID')
self.package = pyfile.package()
def request_package(self):
self.html = self.load(self.pyfile.url)
def is_online(self):
if self.OFFLINE_TOKEN in self.html:
self.log_debug("File not found")
return False
return True
def is_password_protected(self):
if self.PASSWORD_TOKEN in self.html:
self.log_debug("Links are password protected")
return True
def is_captcha_protected(self):
if self.CAPTCHA_TOKEN in self.html:
self.log_debug("Links are captcha protected")
return True
return False
def unlock_password_protection(self):
password = self.get_password()
self.log_debug("Submitting password [%s] for protected links" % password)
if password:
passwd_url = self.PASSWORD_SUBMIT_URL + "?id=%s" % self.fileid
passwd_data = {'id': self.fileid, 'password': password, 'pw': 'submit'}
self.html = self.load(passwd_url, post=passwd_data)
def unlock_captcha_protection(self):
self.log_debug("Request user positional captcha resolving")
captcha_img_url = self.CAPTCHA_IMG_URL + "?id=%s" % self.fileid
coords = self.captcha.decrypt(captcha_img_url, input_type="png", output_type='positional', ocr="CircleCaptcha")
self.log_debug("Captcha resolved, coords [%s]" % str(coords))
captcha_post_url = self.CAPTCHA_SUBMIT_URL + "?id=%s" % self.fileid
captcha_post_data = {'button.x': coords[0], 'button.y': coords[1], 'captcha': 'submit'}
self.html = self.load(captcha_post_url, post=captcha_post_data)
def get_package_info(self):
name = folder = None
#: Try to get info from web
m = re.search(self.FILE_TITLE_REGEX, self.html)
if m is not None:
title = m.group(1).strip()
if not self.FILE_NOTITLE in title:
name = folder = title
self.log_debug("Found name [%s] and folder [%s] in package info" % (name, folder))
#: Fallback to defaults
if not name or not folder:
name = self.package.name
folder = self.package.folder
self.log_debug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder))
#: Return package info
return name, folder
def handle_errors(self):
if self.PASSWORD_ERROR_ROKEN in self.html:
self.fail(_("Wrong password"))
if self.captcha:
if self.CAPTCHA_ERROR_ROKEN in self.html:
self.retry_captcha()
else:
self.captcha.correct()
def handle_link_source(self, source):
if source == "cnl2":
return self.handle_CNL2Links()
elif source == "dlc":
return self.handle_DLC_links()
elif source == "web":
return self.handle_WEB_links()
else:
self.error(_('Unknown source type "%s"') % source)
def handle_CNL2Links(self):
self.log_debug("Search for CNL2 links")
package_links = []
m = re.search(self.CNL2_FORM_REGEX, self.html, re.S)
if m is not None:
cnl2_form = m.group(1)
try:
(vcrypted, vjk) = self._get_cipher_params(cnl2_form)
for (crypted, jk) in zip(vcrypted, vjk):
package_links.extend(self._get_links(crypted, jk))
except Exception:
self.log_debug("Unable to decrypt CNL2 links", trace=True)
return package_links
def handle_DLC_links(self):
self.log_debug("Search for DLC links")
package_links = []
m = re.search(self.DLC_LINK_REGEX, self.html)
if m is not None:
container_url = self.DLC_DOWNLOAD_URL + "?id=%s&dlc=1" % self.fileid
self.log_debug("Downloading DLC container link [%s]" % container_url)
try:
dlc = self.load(container_url)
dlc_filename = self.fileid + ".dlc"
dlc_filepath = fs_join(self.pyload.config.get("general", "download_folder"), dlc_filename)
with open(dlc_filepath, "wb") as f:
f.write(dlc)
package_links.append(dlc_filepath)
except Exception:
self.fail(_("Unable to download DLC container"))
return package_links
def handle_WEB_links(self):
self.log_debug("Search for WEB links")
package_links = []
params = re.findall(self.WEB_FORWARD_REGEX, self.html)
self.log_debug("Decrypting %d Web links" % len(params))
for index, param in enumerate(params):
try:
url = self.WEB_FORWARD_URL + "?%s" % param
self.log_debug("Decrypting Web link %d, %s" % (index + 1, url))
res = self.load(url)
link = re.search(self.WEB_LINK_REGEX, res).group(1)
package_links.append(link)
except Exception, detail:
self.log_debug("Error decrypting Web link %s, %s" % (index, detail))
self.wait(4)
return package_links
def _get_cipher_params(self, cnl2_form):
#: Get jk
jk_re = self.CNL2_FORMINPUT_REGEX % self.CNL2_JK_KEY
vjk = re.findall(jk_re, cnl2_form, re.I)
#: Get crypted
crypted_re = self.CNL2_FORMINPUT_REGEX % RelinkUs.CNL2_CRYPTED_KEY
vcrypted = re.findall(crypted_re, cnl2_form, re.I)
#: Log and return
self.log_debug("Detected %d crypted blocks" % len(vcrypted))
return vcrypted, vjk
def _get_links(self, crypted, jk):
#: Get key
jreturn = self.js.eval("%s f()" % jk)
self.log_debug("JsEngine returns value [%s]" % jreturn)
key = binascii.unhexlify(jreturn)
#: Decrypt
Key = key
IV = key
obj = AES.new(Key, AES.MODE_CBC, IV)
text = obj.decrypt(crypted.decode('base64'))
#: Extract links
text = text.replace("\x00", "").replace("\r", "")
links = filter(bool, text.split('\n'))
#: Log and return
self.log_debug("Package has %d links" % len(links))
return links
|
esc/Bento | refs/heads/master | bento/private/_yaku/yaku/conf.py | 3 | import os
import sys
import re
try:
from hashlib import md5
except ImportError:
from md5 import md5
if sys.version_info[0] < 3:
from cStringIO \
import \
StringIO
else:
from io \
import \
StringIO
from yaku.errors \
import \
UnknownTask
from yaku.utils \
import \
ensure_dir
def create_file(conf, code, prefix="", suffix=""):
filename = "%s%s%s" % (prefix, md5(code.encode()).hexdigest(), suffix)
node = conf.bld_root.declare(filename)
node.write(code)
return node
def with_conf_blddir(conf, name, body, func):
"""'Context manager' to execute a series of tasks into code-specific build
directory.
func must be a callable taking no arguments
"""
old_root, new_root = create_conf_blddir(conf, name, body)
try:
conf.bld_root = new_root
conf.bld_root.ctx.bldnode = new_root
return func()
finally:
conf.bld_root = old_root
conf.bld_root.ctx.bldnode = old_root
def write_log(conf, log, tasks, code, succeed, explanation):
for line in code.splitlines():
log.write(" |%s\n" % line)
if succeed:
log.write("---> Succeeded !\n")
else:
log.write("---> Failure !\n")
log.write("~~~~~~~~~~~~~~\n")
log.write(explanation)
log.write("~~~~~~~~~~~~~~\n")
s = StringIO()
s.write("Command sequence was:\n")
for t in tasks:
try:
cmd = conf.get_cmd(t)
s.write("%s\n" % " ".join(cmd))
stdout = conf.get_stdout(t)
if stdout:
s.write("\n")
for line in stdout.splitlines():
s.write("%s\n" % line)
s.write("\n")
except UnknownTask:
break
log.write(s.getvalue())
log.write("\n")
def create_conf_blddir(conf, name, body):
dirname = ".conf-%s-%s" % (name, hash(name+body))
bld_root = os.path.join(conf.bld_root.abspath(), dirname)
if not os.path.exists(bld_root):
os.makedirs(bld_root)
bld_root = conf.bld_root.make_node(dirname)
old_root = conf.bld_root
return old_root, bld_root
|
hehongliang/tensorflow | refs/heads/master | tensorflow/python/ops/parallel_for/control_flow_ops.py | 1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""for_loop and pfor ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.parallel_for.pfor import PFor
from tensorflow.python.util import nest
def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
"""Runs `loop_fn` `iters` times and stacks the outputs.
Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
stacks corresponding outputs of the different runs.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of tensor
objects. The shape of these outputs should not depend on the input.
loop_fn_dtypes: dtypes for the outputs of loop_fn.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: The number of iterations that can be dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked output tensor objects with the same
nested structure as the output of `loop_fn`.
"""
flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
is_none_list = []
def while_body(i, *ta_list):
"""Body of while loop."""
fn_output = nest.flatten(loop_fn(i))
if len(fn_output) != len(flat_loop_fn_dtypes):
raise ValueError(
"Number of expected outputs, %d, does not match the number of "
"actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes),
len(fn_output)))
outputs = []
del is_none_list[:]
is_none_list.extend([x is None for x in fn_output])
for out, ta in zip(fn_output, ta_list):
# TODO(agarwal): support returning Operation objects from loop_fn.
if out is not None:
ta = ta.write(i, array_ops.expand_dims(out, 0))
outputs.append(ta)
return tuple([i + 1] + outputs)
if parallel_iterations is not None:
extra_args = {"parallel_iterations": parallel_iterations}
else:
extra_args = {}
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters,
while_body,
[0] + [tensor_array_ops.TensorArray(dtype, iters)
for dtype in flat_loop_fn_dtypes],
**extra_args)[1:]
# TODO(rachelim): enable this for sparse tensors
output = [None if is_none else ta.concat()
for ta, is_none in zip(ta_list, is_none_list)]
return nest.pack_sequence_as(loop_fn_dtypes, output)
def _flatten_first_two_dims(x):
"""Flattens the first two dimensions of x into a single dimension."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]],
axis=0)
return array_ops.reshape(x, new_shape)
def pfor(loop_fn, iters, parallel_iterations=None):
"""Equivalent to running `loop_fn` `iters` times and stacking the outputs.
`pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
times, with input from 0 to `iters - 1`, and stacking corresponding output of
each iteration. However the implementation does not use a tf.while_loop.
Instead it adds new operations to the graph that collectively compute the same
value as what running `loop_fn` in a loop would compute.
This is an experimental feature and currently has a lot of limitations:
- There should be no data depenendency between the different iterations. For
example, a future iteration should not depend on a value or side-effect of
a previous iteration.
- Stateful kernels may mostly not be supported since these often imply a
data dependency or ordering of the iterations. We do support a limited set
of such stateful kernels though (like RandomFoo, Variable operations like
reads, etc).
- Conversion works only on a limited set of kernels for which a converter
has been registered.
- loop_fn has limited support for control flow operations. tf.cond in
particular is not supported.
- `loop_fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of `loop_fn` outputs should not depend on the input
to loop_fn.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of Tensor or
Operation objects. Note that if setting `parallel_iterations` argument to
something other than None, `loop_fn` may be called more than once during
graph construction. So it may need to avoid mutating global state.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None corresponds to
vectorizing all the iterations. If `parallel_iterations` is smaller than
`iters`, then chunks of at most that many iterations are dispatched in
sequence. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked tensor objects with the same nested
structure as the output of `loop_fn`.
Raises:
ValueError: If parallel_iterations is not None and not an integer > 1.
"""
existing_ops = set(ops.get_default_graph().get_operations())
with ops.name_scope("loop_body"):
loop_var = array_ops.placeholder(dtypes.int32, shape=[])
loop_fn_outputs = loop_fn(loop_var)
new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
iters = ops.convert_to_tensor(iters)
if parallel_iterations is not None:
if parallel_iterations < 1:
raise ValueError("parallel_iterations must be None or a positive integer")
if parallel_iterations == 1:
raise ValueError("Found parallel_iterations == 1. Use for_loop instead.")
iters_value = tensor_util.constant_value(iters)
if iters_value is not None and iters_value < parallel_iterations:
parallel_iterations = None
if parallel_iterations is None:
with ops.name_scope("pfor"):
converter = PFor(loop_var, iters, new_ops)
outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
outputs.append(converter.convert(loop_fn_output))
return nest.pack_sequence_as(loop_fn_outputs, outputs)
else:
num_tiled_iterations = iters // parallel_iterations
num_remaining_iterations = iters % parallel_iterations
# TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside
# a tf.function and extract the graph from there to vectorize it.
with ops.name_scope("pfor_untiled"):
converter = PFor(loop_var, num_remaining_iterations, new_ops)
remaining_outputs = []
flattened_loop_fn_outputs = nest.flatten(loop_fn_outputs)
for loop_fn_output in flattened_loop_fn_outputs:
remaining_outputs.append(converter.convert(loop_fn_output))
with ops.name_scope("pfor_tiled"):
loop_fn_dtypes = [ops.convert_to_tensor(x).dtype
for x in flattened_loop_fn_outputs]
def tiled_loop_body(j):
offset = j * parallel_iterations + num_remaining_iterations
def tiled_loop_fn(i):
return nest.flatten(loop_fn(i + offset))
return pfor(tiled_loop_fn, parallel_iterations)
tiled_outputs = for_loop(tiled_loop_body, loop_fn_dtypes,
num_tiled_iterations, parallel_iterations=1)
tiled_outputs = [_flatten_first_two_dims(y) for y in tiled_outputs]
with ops.name_scope("pfor"):
iters_value = tensor_util.constant_value(iters)
if iters_value is None or iters_value % parallel_iterations:
outputs = control_flow_ops.cond(
math_ops.equal(num_remaining_iterations, 0),
lambda: tiled_outputs,
lambda: [array_ops.concat([x, y], axis=0)
for x, y in zip(remaining_outputs, tiled_outputs)])
else:
outputs = tiled_outputs
return nest.pack_sequence_as(loop_fn_outputs, nest.flatten(outputs))
|
mekarpeles/waltz | refs/heads/master | waltz/treasury.py | 1 | #!/usr/bin/env python
"""
ballroom.treasury
~~~~~~~~~~~~~~~~~
The treasury contains several modules which, when joined, provide shopping cart function.
Product:
A Product is a generic interface which one may extend or inherit in order to
represent a physical or digital e-commerce product.
Item:
A wrapper which encapsulates and functions upon the base Product
class's attributes in order to interface as a unit of the Cart object.
Items can be bound to a coupon which may modify or adjust the total
item price according to the Item.qty (quantity), as well as the
specifications of the coupon.
Cart:
A shopping Cart is a collection of Items.
Coupon Calculation:
The cart has a property() called 'total' which caclulate the
cart total, with coupons applied, in realtime. First, the
procedure iterates over each Item within the cart and invoke
the Item's total property (thus getting the adjusted price for
the Item, according to the item qty and coupons). After the
subtotal has been calculated for all Items, any cart level
coupons are then applied to this subtotal in order to yield the
final total. Taxes, fees, and commissions are applies at this
point.
Coupon:
Coupons can be applied at both the Cart and the Item level. If
applied at the Cart level, the coupon will have the effect of a
value or percentage off of the entire Cart.value. If a Coupon is
applied to an Item, a 'limit' parameter may be specified as a
modifier to limit the max number of applications the Coupon can be
applied to an Item's products (e.g. given an Item having quantity
5, (i.e. Item.qty == 5) bound to a Coupon having a limit=3 may/can
only adjust (apply to) 3 of the 5 instances of the Product this
Item represents.
"""
__author__ = ["Michael E. Karpeles", "Stephen Balaban"]
__email__ = ["michael.karpeles@gmail.com", "mail@stephenbalaban.com"]
__version__ = "0.0.3"
from decimal import Decimal
from utils import Storage
class Coupon(object):
def __init__(self, cid, code, percent_off=0, value_off="0.00", pids=None, limit=1):
"""
params:
value_off - either a string or a Decimal dollar value with precision of 2 digits
pids - a list of product ids for which the coupon applies.
The default action is for the coupon to apply to the entire
cart / purchase.
limit - the number of times this coupon should be applied to a product
"""
self.id = cid # database identifier of the coupon
self.pids = pids
self.code = code # coupon code which the user supplies
self.percent_off = percent_off
self.value_off = Decimal(value_off)
self.applications = limit
@classmethod
def apply(cls, coupon, price):
"""Applies a coupon to a single unit of an item (an item can
have a qty > 1) and returns the adjusted price for that
unit. In terms of order of operations, first subtracts any
monetary value off discount and then afterwards applies the
percentage off (if any specified in coupon) to the updated
subtotal
"""
if not coupon:
return price
dprice = Decimal(.01 * (100.0 - coupon.percent_off)) * (price - coupon.value_off)
qdprice = (dprice).quantize(Decimal('.01'), rounding='ROUND_DOWN')
if qdprice <= 0:
return Decimal('0.00')
class Product(object):
"""Necessary scaffolding for a Product in order to interface with
the Cart. Feel free to extend this object to fit your needs
params:
upc (optional) - Standard UPC-A (see encoding - http://en.wikipedia.org/wiki/Universal_Product_Code#Encoding)
"""
def __init__(self, pid, label, desc="", price="9.99", currency="USD", phash=None, upc=None):
self.id = pid
self.name = label
self.description = desc
self.price = Decimal(price)
self.hash = phash # still needs to be standardized (perhaps see web.py utils for hash ideas)
self.upc = upc
class Item(object):
"""
usage:
>>> p = Product(1, 'shoes', desc='new pair of shoes',
... price='13.37', phash='d81hda8z')
>>> item = Item(product=p)
>>> item.total
'13.37'
"""
def _verify(self, product=None, qty=1, coupon=None, **kwags):
"""Throws an exception if any of the parameters fail to pass
verification. A failure may be the result of several
occurrences, including unexpected types, undesired value
ranges, signs (i.e. signed versus unsigned), etc.
"""
#note refid is a user's id and default, -1, is nobody
if product and not isinstance(product, Product):
raise TypeError('product argument must inherit from the ' \
'Product class.')
if coupon and not isinstance(coupon, Coupon):
raise TypeError("Coupon was expected, instead encountered:" \
"<type: '%s'>" % type(coupon))
if type(qty) is not int:
qty = 1
def __init__(self, product=None, qty=1, coupon=None, ref=None):
"""
params:
product - an instance of an object which subclasses Product
ref - referral id, for affiliates or referrers
"""
# need to direclty set to avoid infinite loop
self._verify(product, qty, coupon)
object.__setattr__(self, 'qty', qty)
object.__setattr__(self, 'price', Decimal('0.00'))
if not ref:
ref = self.get_empty_ref()
self.product = product
self.coupon = coupon
self.id = product.id
self.qty = qty
self.price = getattr(self.product, "price")
self.currency = getattr(self.product, "currency", "USD")
self.ref = ref
def __repr__(self):
result = "<Item id: "+ str(self.id) + ", total: " + str(self.total) \
+ ", qty: " + str(self.qty) \
+ ", price: " + str(self.price) \
+ ", currency: " + str(self.currency) \
+ ", ref: " + str(self.ref) \
+ ", coupon: " + str(self.coupon) \
+ ", products: " + str(self.product) + " >"
return unicode(result)
def __setattr__(self, name, value):
kwarg = {name: value}
self._verify(**kwarg)
object.__setattr__(self, name, value)
@property
def total(self):
ttl = Decimal('0.00')
applications = self.coupon.applications if self.coupon else 0
if applications >= self.qty:
return Coupon.apply(self.coupon, self.price) * self.qty
return (Coupon.apply(self.coupon, self.price) * applications) + \
(self.price * (self.qty - applications))
@classmethod
def get_empty_ref(self):
"""
returns an empty ref, free to use!
>>> Item.get_empty_bag()
"""
default_refid = -1
empty_ref = Storage({'id': default_refid,
'username': None,
'time': None,
'uri': None,
'tag': None })
return empty_ref
class Cart(object):
"""
main atributes:
total - total value of cart denominated in cart.currency
qty - total number of items in cart
currency - cart currency symbol-code tuple ("$", "USD")
taxrate - current taxrate of cart (regional basis)
itemsdict - list containing current items
main methods:
add(product)
remove(pid)
empty()
usage:
>>> cart = Cart(itemsdict=[],
>>> taxrate=0,
>>> currency=('$','USD'))
>>> cart.add(product) # adds a product object to the bar + recalculates
>>> cart.empty() # empties cart, resets totals
>>> cart.total # returns the total cost of all items in cart + tax
>>> cart.qty # recalculates and returns qty of items in cart
>>> cart.itemsdict[pid] #returns item object <Item qty: 1, total: blah, product...
>>> cart.__dict__ # dict representation
>>> cart.get(product_object) # blah
"""
def _verify(self):
pass
def __init__(self, taxrate=Decimal('0.00'), currency=("$", "USD"),
qty=0, total=Decimal('0.00'), coupon=None):
if taxrate > Decimal('1.00') or taxrate < Decimal('0.00'):
raise ValueError('taxrate is a decimal between 0.00 and 1.00')
self.taxrate = taxrate
self.currency = currency
self.itemsdict = {}
self.coupon = coupon
def __repr__(self):
result = '<Cart total: ' + str(self.total) \
+ ', currency: ' + str(self.currency) \
+ ', itemsdict: ' + str(map(self._repr_wrap, self.itemsdict.keys())) \
+ ', qty: ' + str(self.qty) \
+ ', taxrate: ' + str(self.taxrate) \
+ ', coupon: ' + (str(self.coupon.id) if getattr(self, 'coupon', None) else 'None') \
+ ', pythonid: ' + str(id(self)) + '>'
return unicode(result)
def _repr_wrap(self, thing):
return "<Item " + str(thing) + ">"
def __setattr__(self, name, value):
"""
controls attribute settings
"""
if name == 'qty':
try:
value = int(value)
except ValueError as details:
raise ValueError("please provide an int for qty")
if value < 0:
raise ValueError("qty cannot be less than zero")
object.__setattr__(self, name, value)
@property
def currency_code(self):
"""returns currency code
>>> cart.currency_code
"USD"
"""
return self.currency[1]
@property
def currency_symbol(self):
"""returns currency symbol
>>> cart.currency_symbol
"$"
"""
return self.currency[0]
def add(self, product=None, qty=1, ref=None):
"""
adds product object, sets quantity and refid to cart
plain vanilla product add
>>> cart.add(product)
add with kwargs:
>>> cart.add(product=product, qty=3, ref=<Storage ...>)
"""
if not product: return self
if self.itemsdict.has_key(product.id):
#if item exists, increment
self.itemsdict[product.id].qty += qty
else:
item = Item(product=product, qty=qty, ref=ref)
self.itemsdict[item.id] = item
self.invariants()
return self
@property
def total(self):
"""Dynamically calculates an up-to-date total in a piecewise
fashion by traversing each each item in the cart and
dynamically calculating the totals of each of the cart's
items. C"""
subtotal = sum([item.total for k, item in self.itemsdict.items()])
return Coupon.apply(self.coupon, subtotal)
@property
def qty(self):
"""Returns the number of unique items within the cart ignoring the
quantity of the items, itself.
XXX Make sure len is working correctly over list of tuples
returned by itemsdict.items()
"""
return sum([item.qty for k, item in self.itemsdict.items()])
def remove(self, pid, amt=None):
"""removes ALL of an item from cart based on product id
>>> #using id
>>> cart.remove(2) #pid = 2, an item in cart
2
>>> cart.remove(2) # an item no longer in cart raises KeyError
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "cart/cart.py", line 149, in remove
raise KeyError('no item found')
KeyError: 'no item found'
Note: if you only want to remove some of the items in the cart,
use the cart.set(id, qty) method instead.
"""
if not (isinstance(pid, long) or isinstance(pid, int)):
raise TypeError("Product ID must be an int or long; " \
"Instead received value: %s of " \
"type %s" % (pid, type(pid)))
if not self.itemsdict.has_key(pid):
raise KeyError('no item found')
else:
del self.itemsdict[pid]
self.invariants()
return self
def get(self, pid=None, slug=None):
"""
gets an Item based on product_id
>>> cart.get(pid) #pid = 2 (an item not in cart)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "cart/cart.py", line 171, in get
return self.itemsdict[key]
KeyError: 2
>>> cart.get(2) #an item in cart
<Item id: 2, total: ... >
"""
pid = long(pid)
key = -1
if pid is None and slug is None:
raise ValueError("please supply an pid or slug")
if pid is None:
# slug search O(n)
for k,v in self.itemsdict.items():
if v.product.name == slug:
key = k
else:
# pid O(1)
key = pid
return self.itemsdict[key]
def contains(self, pid=None):
"""
returns boolean if itemsdict contains product id (pid)
consider:
extend this function s.t. you provide a key and a value to
search for.
usage:
>>> cart.add(product) # product.id = 3, product.slug = "Slug Text"
>>> cart.contains(3)
True
"""
if pid:
raise ValueError("please supply an pid or slug")
return self.itemsdict.has_key(pid)
@property
def tax(self):
return Decimal(self.total * self.taxrate).quantize(Decimal('.01'), rounding='ROUND_DOWN')
def invariants(self):
"""tests cart invariants -- TODO"""
pass
def empty(self):
"""
empties this cart, keeps taxrate, currency, returns copy
>>> cart.empty()
<Cart total: 0.00, currency: ('$', 'USD'), qty: 0, itemsdict: {},
taxrate: 0.00, coupon: None>
"""
return self.nullify()
def nullify(self):
"""
Purges all new carts directly after being issued to a session
"""
self.coupon = None
self.itemsdict = {}
def make_receipt(self):
"""
Return a string detailing all of the items in the cart
and their costs to be used as a receipt.
"""
desc = ""
for index, item in enumerate(self.itemsdict):
if index == 0:
desc = "item #%s - %s - $%s" % (self.itemsdict[item].product.hash,
self.itemsdict[item].product.name,
self.itemsdict[item].price)
else:
desc = "%s, item #%s - %s - $%s" % (desc, self.itemsdict[item].product.hash,
self.itemsdict[item].product.name,
self.itemsdict[item].price)
return desc
|
vikas1885/test1 | refs/heads/master | common/lib/xmodule/xmodule/util/duedate.py | 256 | """
Miscellaneous utility functions.
"""
from functools import partial
def get_extended_due_date(node):
"""
Gets the actual due date for the logged in student for this node, returning
the extendeded due date if one has been granted and it is later than the
global due date, otherwise returning the global due date for the unit.
"""
if isinstance(node, dict):
get = node.get
else:
get = partial(getattr, node)
due_date = get('due', None)
if not due_date:
return due_date
extended = get('extended_due', None)
if not extended or extended < due_date:
return due_date
return extended
|
gezb/osmc | refs/heads/master | package/mediacenter-addon-osmc/src/script.module.osmcsetting.remotes/resources/lib/remote_gui.py | 6 |
# KODI modules
import xbmc
import xbmcaddon
import xbmcgui
# Standard modules
import sys
import os
import shutil
import subprocess
import threading
addonid = "script.module.osmcsetting.remotes"
__addon__ = xbmcaddon.Addon("script.module.osmcsetting.remotes")
__path__ = xbmc.translatePath(xbmcaddon.Addon(addonid).getAddonInfo('path'))
DIALOG = xbmcgui.Dialog()
# Custom module path
sys.path.append(os.path.join(__path__, 'resources','lib'))
# OSMC SETTING Modules
from CompLogger import comprehensive_logger as clog
ACTION_PREVIOUS_MENU = 10
ACTION_NAV_BACK = 92
SAVE = 5
HEADING = 1
ACTION_SELECT_ITEM = 7
LIRCD_PATH = '/etc/lirc/lircd.conf'
ETC_LIRC = '/etc/lirc'
if not os.path.isdir(ETC_LIRC):
LIRCD_PATH = '/home/plaskev/temp/lirc/lircd.conf'
ETC_LIRC = '/home/plaskev/temp/lirc'
def log(message):
try:
message = str(message)
except UnicodeEncodeError:
message = message.encode('utf-8', 'ignore' )
xbmc.log('REMOTE: ' + str(message), level=xbmc.LOGDEBUG)
@clog(log)
def lang(id):
san = __addon__.getLocalizedString(id).encode( 'utf-8', 'ignore' )
return san
def construct_listitem(conf):
path, filename = os.path.split(conf)
# get conf name; check first line in file for "# name:"
with open(conf, 'r') as f:
lines = f.readlines()
first_line = lines[0]
if first_line.startswith("# name:"):
name = first_line[len("# name:"):]
name2 = filename
else:
name = filename.replace('.conf', '')
name2 = conf
# check for remote image, use it if it is available
image_path = os.path.join(path, filename.replace('.conf','.png'))
if os.path.isfile(image_path):
tmp = xbmcgui.ListItem(label=name, label2=name2, thumbnailImage=image_path)
else:
tmp = xbmcgui.ListItem(label=name, label2=name2)
tmp.setProperty('fullpath',conf)
tmp.setInfo('video',{'title': ''.join(lines[:100])})
return tmp
def test_custom(conf):
''' Returns a boolean indicating whether the supplied conf file is a custom conf file. '''
try:
path, filename = os.path.split(conf)
if path != ETC_LIRC:
return True
else:
return False
except:
return False
class remote_gui_launcher(object):
def __init__(self):
# flag to idicate whether the GUI should re-open upon close. This is for when the remote changes do not stick.
self.reopen = True
# container for any confs we want to ignore
self.excluded = ['lircd.conf']
self.active_conf = os.path.realpath(LIRCD_PATH)
# check if the target file actually exists, if it doesnt, then set the active conf file as None,
# if it does, then check whether it is a custom file
if os.path.isfile(self.active_conf):
custom = test_custom(self.active_conf)
else:
custom = False
self.active_conf = None
# get the contents of /etc/lirc/
local_confs_base = os.listdir(ETC_LIRC)
local_confs_raw = [os.path.join(ETC_LIRC, conf) for conf in local_confs_base]
local_confs_raw.sort()
# filter list by files with size (this just removes any empty confs)
local_confs = []
for conf in local_confs_raw:
if os.path.basename(conf) in self.excluded: continue
if not conf.endswith('.conf'): continue
try:
if os.stat(conf).st_size == 0: continue
except:
continue
local_confs.append(construct_listitem(conf))
if custom:
# self.active_conf can only be None if custom is False, so there is no risk in this
# reconstruction of the local_confs
local_confs = [construct_listitem(self.active_conf)] + local_confs
xml = "RemoteBrowser_720OSMC.xml" if xbmcgui.Window(10000).getProperty("SkinHeight") == '720' else "RemoteBrowser_OSMC.xml"
self.remote_gui = remote_GUI(xml, __path__, 'Default', local_confs=local_confs, active_conf=self.active_conf)
def open_gui(self):
while self.reopen:
self.reopen = False
self.remote_gui.doModal()
class remote_GUI(xbmcgui.WindowXMLDialog):
def __init__(self, strXMLname, strFallbackPath, strDefaultName, local_confs, active_conf):
self.local_confs = local_confs
self.active_conf = active_conf
self.rc6_file = '/etc/modprobe.d/blacklist-rc6.conf'
self.rc6_file_loc = '/etc/modprobe.d'
self.remote_selection = None
def onInit(self):
self.list = self.getControl(500)
self.list.setVisible(True)
for i, x in enumerate(self.local_confs):
self.list.addItem(x)
self.highlight_selected()
try:
self.getControl(50).setVisible(False)
except:
pass
# check for RC6 file, then set the radiobutton appropriately
if os.path.isfile(self.rc6_file):
log('RC6 blacklist file located')
self.getControl(8).setSelected(True)
else:
log('RC6 blacklist file not found')
self.getControl(8).setSelected(False)
def find_custom_item(self):
log('Finding custom item in list')
for i in range(0,self.list.size()):
tmp = self.list.getListItem(i)
tmp_path = tmp.getLabel2()
if test_custom(tmp_path):
log('Custom item found')
return i, tmp
return 0, 'failed'
def highlight_selected(self):
log('Changing highlighting to %s' % self.active_conf)
for i in range(0,self.list.size()):
tmp = self.list.getListItem(i)
tmp_path = tmp.getLabel2()
# if self.active_conf is None (i.e. the user deleted it externally) then no item will be selected
if self.active_conf == tmp_path:
tmp.select(True)
else:
tmp.select(False)
def rc6_handling(self):
sel = self.getControl(8).isSelected()
if sel:
# always overwrite the file, this will allow the contents to be updated (if ever needed)
log('Creating RC6 blacklist file')
with open('/var/tmp/blacklist-rc6.conf', 'w') as f:
f.write('blacklist ir_rc6_decoder\ninstall ir_rc6_decoder /bin/true')
subprocess.call(["sudo", "mv", '/var/tmp/blacklist-rc6.conf', self.rc6_file_loc])
log('RC6 blacklist file moved')
else:
log('RC6 blacklist file removed')
subprocess.call(["sudo", "rm", "-f", self.rc6_file])
def onClick(self, controlID):
if controlID == 500:
# user has selected a local file from /etc/lirc
self.remote_selection = self.getControl(500).getSelectedItem().getProperty('fullpath')
result = self.test_selection()
if result == 'success':
log('User confirmed the remote changes work')
# change the highlighted remote to the new selection
self.active_conf = self.remote_selection
elif result == 'service_dead':
log('Remote service failed to restart.')
ok = DIALOG.ok(lang(32006), lang(32013))
self.remote_selection = None
else:
log('User did not confirm remote changes')
self.remote_selection = None
self.highlight_selected()
elif controlID == 7:
# user has selected Exit
self.rc6_handling()
self.remote_selection = None
self.close()
elif controlID == 62:
# user has chosen to browse for the file
log('User is browsing for remote conf')
browser = xbmcgui.Dialog().browse(1, lang(32005), 'files', mask='.conf')
if browser:
log('User selected remote conf: %s' % self.remote_selection)
self.remote_selection = browser
result = self.test_selection()
if result == 'success':
log('user confirmed the remote changes work')
# change the highlighted remote to the new selection
self.active_conf = self.remote_selection
# see if there is a custom file in the list, delete it if there is
i, custom = self.find_custom_item()
if custom:
self.list.removeItem(i)
# add the new custom as an item
# self.active_conf cannot be None at this point, as the user must have selected one
tmp = construct_listitem(self.active_conf)
self.list.addItem(tmp)
self.highlight_selected()
elif result == 'service_dead':
log('Remote service failed to restart.')
ok = DIALOG.ok(lang(32006), lang(32013))
self.remote_selection = None
else:
self.remote_selection = None
else:
self.remote_selection = None
def test_selection(self):
log('Testing remote conf selection: %s' % self.remote_selection)
if os.path.isfile(self.remote_selection):
# read the symlink target
original_target = os.readlink( LIRCD_PATH )
log('Original lircd_path target: %s' % original_target)
# symlink the master conf to the new selection
subprocess.call(['sudo', 'ln', '-sf', self.remote_selection, LIRCD_PATH])
# open test dialog
xml = "OSMC_remote_testing720.xml" if xbmcgui.Window(10000).getProperty("SkinHeight") == '720' else "OSMC_remote_testing.xml"
self.remote_test = remote_test(xml, __path__, 'Default', self.remote_selection)
self.remote_test.doModal()
log('Testing complete, result: %s' % self.remote_test.test_successful)
# if the test wasnt successful, then revert to the previous conf
if not self.remote_test.test_successful:
subprocess.call(['sudo', 'ln', '-sf', original_target, LIRCD_PATH])
subprocess.call(['sudo', 'systemctl', 'restart', 'lircd_helper@*'])
# add busy dialog, loop until service restarts
if not self.remote_test.service_running:
return 'service_dead'
else:
return 'failed'
return 'success'
return 'failed'
class remote_test(xbmcgui.WindowXMLDialog):
# control IDs
# 90 restarting service label
# 91 service restarted label, informs the user that the service has restarted and to confirm using the test button
# 25 test button, user clicks this to confirm that the remotes changes have been successful
# 45 countdown label, is controlled by the timer, and counts down the seconds to revert
# 55 quick revert button
def __init__(self, strXMLname, strFallbackPath, strDefaultName, selection):
self.test_successful = False
self.service_running = True
self.selection = selection
self.countdown_limit = 20
self.quick_revert = False
self.countdown_timer = countdown_timer(self)
self.countdown_timer.setDaemon(True)
# setup the service checker straight away
self.service_checker = service_checker(self)
self.service_checker.setDaemon(True)
def onInit(self):
log('Opening test dialog')
self.restarting_service_label = self.getControl(90)
self.check_remote_label = self.getControl(91)
# self.countdown_label = self.getControl(45)
self.test_button = self.getControl(25)
self.progress_bar = self.getControl(101)
self.initial_state()
# start the service_checker AFTER the class attributes have been set (prevents race condition)
self.service_checker.start()
def initial_state(self):
''' the dialog is telling the user that the service if restarting, and to please wait '''
log('Setting initial state of test dialog')
# change label to say remote service restarting
self.progress_bar.setVisible(False)
self.restarting_service_label.setVisible(True)
self.check_remote_label.setVisible(False)
self.test_button.setVisible(False)
# self.countdown_label.setVisible(False)
# self.countdown_label.setLabel(str(self.countdown_limit))
def second_state(self):
''' the service has been confirmed to be running again, and now the dialog is telling the user to click on
on the Confirm button. This will confirm that they have been able to navigate down to the button, and
click on it. '''
log('Setting second state of test dialog')
# change the label to say that the remote service has restarted and does the user want to keep the changes
self.restarting_service_label.setVisible(False)
self.check_remote_label.setVisible(True)
self.progress_bar.setVisible(True)
self.test_button.setVisible(True)
# display the exit button (controlID 25)
# self.countdown_label.setVisible(True)
# start the timer
self.countdown_timer.start()
def service_dead_state(self):
''' the service has not been detected to have started within 20 seconds.
inform the user with OK style dialog
'''
log('Service is dead')
self.service_running = False
self.close()
def onClick(self, controlID):
if controlID == 25:
''' user has clicked the test successful button, keep the changes,
this is the only place that the new conf can be confirmed
'''
log('User has confirmed that the new conf is working.')
self.test_successful = True
self.countdown_timer.exit = True
try:
self.service_checker.exit = True
except:
pass
self.close()
elif controlID == 55:
''' The user has decided to end the test, and would like to revert to the previous conf. This
is likely to only occur while the service is being checked.
'''
log('User has decided to revert to the previous conf.')
try:
self.service_checker.exit = True
except:
pass
self.countdown_timer.exit = True
self.close()
class service_checker(threading.Thread):
''' Rsstarts the remote service, and waits for the response that it is running. '''
def __init__(self, parent):
super(service_checker, self).__init__(name='service_checker')
self.parent = parent
self.exit = False
def run(self):
log('Remote service checker thread active.')
counter = 0
# restart the service for the changes to take effect
proc = subprocess.Popen(['sudo', 'systemctl', 'restart', 'lircd_helper@*'])
# loop until the service has restarted (or too much time has elapsed, in which case fail out)
while counter < 40 and not self.exit:
p = proc.poll()
if p is None:
counter += 1
xbmc.sleep(250)
continue
elif p == 0:
break
else:
# if the process times out or the exit signal is recieved, then return nothing
# on a timeout however, enter something in the log
if counter >= 40:
# this is reached if the counter reaches 40, meaning the process check timed out
self.parent.service_dead_state()
elif self.exit:
# this occurs when the user has clicked cancel or back
# there is no need to do anything
pass
elif p != 0:
# this occurs if there is an error code returned by the process
log('Error code from systemctl restart lircd-helper: %s' % p)
return
# this point it only reached if proc.poll returns 0
self.parent.second_state()
class countdown_timer(threading.Thread):
def __init__(self, parent):
super(countdown_timer, self).__init__(name='countdown_timer')
self.parent = parent
self.exit = False
self.countdown = self.parent.countdown_limit
def run(self):
''' Update the label on the dialog to show how many seconds until the conf reverts to the previous state
'''
log('Countdown timer thread active')
while not self.exit and self.countdown:
# self.parent.countdown_label.setLabel(str(self.countdown))
self.parent.progress_bar.setWidth(self.countdown * 60)
xbmc.sleep(1000)
self.countdown -= 1
self.parent.close()
|
infincia/AEServmon | refs/heads/master | frontpage.py | 1 | #!/usr/bin/env python
# Copyright (c) 2009, Steve Oliver (steve@xercestech.com)
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY STEVE OLIVER ''AS IS'' AND ANY
#EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL STEVE OLIVER BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cgi
import wsgiref.handlers
from datetime import datetime
import os
from models import Server
from google.appengine.ext import webapp
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext.webapp import template
class MainHandler(webapp.RequestHandler):
def get(self):
serverlist = db.GqlQuery("SELECT * FROM Server")
user = users.get_current_user()
template_values = { 'user': user, 'serverlist': serverlist, }
path = os.path.join(os.path.dirname(__file__), 'frontpage.html')
self.response.out.write(template.render(path, template_values))
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
|
tomncooper/heron | refs/heads/master | heronpy/api/tests/python/topology_unittest.py | 5 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
# pylint: disable=protected-access
import os
import unittest
from heronpy.api.serializer import default_serializer
from heronpy.api.topology import Topology, TopologyBuilder, TopologyType
from heronpy.api.stream import Stream, Grouping
from heronpy.api.component.component_spec import HeronComponentSpec
from heronpy.proto import topology_pb2
# required environment variable
# note that this test doesn't write anything to /tmp directory
heron_options = "cmdline.topologydefn.tmpdirectory=/tmp,cmdline.topology.initial.state=RUNNING"
os.environ["HERON_OPTIONS"] = heron_options
class TestSane(Topology):
config = {"topology.wide.config.1": "value",
"spout.overriden.config": True}
spout = HeronComponentSpec(None, "sp_class", True, 3, inputs=None,
outputs=["word", "count",
Stream(fields=['error_msg'], name='error_stream')],
config={"spout.specific.config.1": "value",
"spout.specific.config.2": True,
"spout.specific.config.3": -12.4,
"spout.specific.config.4": [1, 2, 3],
"spout.overriden.config": False})
bolt = HeronComponentSpec(None, "bl_class", False, 4,
inputs={spout: Grouping.SHUFFLE, spout['error_stream']: Grouping.ALL})
# pylint: disable=no-member
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
class TopologyTest(unittest.TestCase):
def setUp(self):
os.environ["HERON_OPTIONS"] = heron_options
def tearDown(self):
os.environ.pop("HERON_OPTIONS", None)
def test_sane_topology(self):
self.assertEqual(TestSane.topology_name, "TestSane")
# topology-wide config
expecting_topo_config = TopologyType.DEFAULT_TOPOLOGY_CONFIG
expecting_topo_config.update({"topology.wide.config.1": "value",
"spout.overriden.config": "true"})
self.assertEqual(TestSane._topo_config, expecting_topo_config)
self.assertEqual(len(TestSane._protobuf_bolts), 1)
self.assertEqual(len(TestSane._protobuf_spouts), 1)
self.assertEqual(len(TestSane._heron_specs), 2)
for spec in TestSane._heron_specs:
if spec.is_spout:
self.assertEqual(spec.name, "spout")
self.assertEqual(spec.python_class_path, "sp_class")
self.assertEqual(spec.parallelism, 3)
else:
self.assertEqual(spec.name, "bolt")
self.assertEqual(spec.python_class_path, "bl_class")
self.assertEqual(spec.parallelism, 4)
self.assertTrue(isinstance(TestSane.protobuf_topology, topology_pb2.Topology))
proto_topo = TestSane.protobuf_topology
### spout protobuf ###
self.assertEqual(len(proto_topo.spouts), 1)
spout = proto_topo.spouts[0]
self.assertEqual(spout.comp.name, "spout")
self.assertEqual(spout.comp.spec, topology_pb2.ComponentObjectSpec.Value("PYTHON_CLASS_NAME"))
self.assertEqual(spout.comp.class_name, "sp_class")
expecting_spout_config = {"topology.component.parallelism": "3",
"spout.specific.config.1": "value",
"spout.specific.config.2": "true",
"spout.specific.config.3": "-12.4",
"spout.specific.config.4": default_serializer.serialize([1, 2, 3]),
"spout.overriden.config": "false"}
self.assertEqual(len(spout.comp.config.kvs), len(expecting_spout_config))
for conf in spout.comp.config.kvs:
value = expecting_spout_config[conf.key]
if conf.type == topology_pb2.ConfigValueType.Value("STRING_VALUE"):
self.assertEqual(value, conf.value)
elif conf.type == topology_pb2.ConfigValueType.Value("PYTHON_SERIALIZED_VALUE"):
self.assertEqual(value, conf.serialized_value)
else:
self.fail()
# output stream
self.assertEqual(len(spout.outputs), 2)
for out_stream in spout.outputs:
if out_stream.stream.id == "default":
self.assertEqual(out_stream.stream.component_name, "spout")
self.assertEqual(len(out_stream.schema.keys), 2)
else:
self.assertEqual(out_stream.stream.id, "error_stream")
self.assertEqual(out_stream.stream.component_name, "spout")
self.assertEqual(len(out_stream.schema.keys), 1)
### bolt protobuf ###
self.assertEqual(len(proto_topo.bolts), 1)
bolt = proto_topo.bolts[0]
self.assertEqual(bolt.comp.name, "bolt")
self.assertEqual(bolt.comp.spec, topology_pb2.ComponentObjectSpec.Value("PYTHON_CLASS_NAME"))
self.assertEqual(bolt.comp.class_name, "bl_class")
expecting_bolt_config = {"topology.component.parallelism": "4"}
self.assertEqual(len(bolt.comp.config.kvs), len(expecting_bolt_config))
conf = bolt.comp.config.kvs[0]
self.assertEqual(conf.type, topology_pb2.ConfigValueType.Value("STRING_VALUE"))
self.assertEqual(conf.value, expecting_bolt_config[conf.key])
# out stream
self.assertEqual(len(bolt.outputs), 0)
# in stream
self.assertEqual(len(bolt.inputs), 2)
for in_stream in bolt.inputs:
if in_stream.stream.id == "default":
self.assertEqual(in_stream.stream.component_name, "spout")
self.assertEqual(in_stream.gtype, topology_pb2.Grouping.Value("SHUFFLE"))
else:
self.assertEqual(in_stream.stream.id, "error_stream")
self.assertEqual(in_stream.stream.component_name, "spout")
self.assertEqual(in_stream.gtype, topology_pb2.Grouping.Value("ALL"))
self.assertEqual(proto_topo.state, topology_pb2.TopologyState.Value("RUNNING"))
def test_no_spout(self):
with self.assertRaises(ValueError):
# pylint:disable = unused-variable
class JustBolt(Topology):
bolt = HeronComponentSpec(None, "bl_class", False, 4)
def test_class_dict_to_specs(self):
# duplicate component name
class_dict = {"spout": HeronComponentSpec("same_name", "sp_cls", True, 1),
"bolt": HeronComponentSpec("same_name", "bl_cls", False, 2)}
with self.assertRaises(ValueError):
TopologyType.class_dict_to_specs(class_dict)
def test_add_spout_specs(self):
# spout with no output
spec = HeronComponentSpec("spout", "sp_cls", True, 1)
with self.assertRaises(ValueError):
TopologyType.add_spout_specs(spec, {})
def test_add_bolt_specs(self):
spec = HeronComponentSpec("bolt", "bl_cls", False, 1)
with self.assertRaises(ValueError):
TopologyType.add_bolt_specs(spec, {})
def test_sanitize_config(self):
# non-string key
with self.assertRaises(TypeError):
TopologyType._sanitize_config({['k', 'e', 'y']: "value"})
with self.assertRaises(TypeError):
TopologyType._sanitize_config({None: "value"})
# convert boolean value
ret = TopologyType._sanitize_config({"key": True})
self.assertEqual(ret["key"], "true")
ret = TopologyType._sanitize_config({"key": False})
self.assertEqual(ret["key"], "false")
# convert int and float
ret = TopologyType._sanitize_config({"key": 10})
self.assertEqual(ret["key"], "10")
ret = TopologyType._sanitize_config({"key": -2400000})
self.assertEqual(ret["key"], "-2400000")
ret = TopologyType._sanitize_config({"key": 0.0000001})
self.assertEqual(ret["key"], "1e-07")
ret = TopologyType._sanitize_config({"key": -15.33333})
self.assertEqual(ret["key"], "-15.33333")
# non-string value -> should expect the same object
ret = TopologyType._sanitize_config({"key": ['v', 'a', 'l', 'u', 'e']})
self.assertEqual(ret["key"], ['v', 'a', 'l', 'u', 'e'])
ret = TopologyType._sanitize_config({"key": None})
self.assertEqual(ret["key"], None)
def test_get_heron_options_from_env(self):
test_value = "cmdline.key.1=/tmp/directory,cmdline.with.space=hello%%%%world"
expecting = {"cmdline.key.1": "/tmp/directory", "cmdline.with.space": "hello world"}
os.environ["HERON_OPTIONS"] = test_value
ret = TopologyType.get_heron_options_from_env()
self.assertEqual(ret, expecting)
# error
os.environ.pop("HERON_OPTIONS")
with self.assertRaises(RuntimeError):
TopologyType.get_heron_options_from_env()
class TopologyBuilderTest(unittest.TestCase):
def test_constructor(self):
builder = TopologyBuilder("WordCount")
self.assertEqual(builder.topology_name, "WordCount")
with self.assertRaises(AssertionError):
TopologyBuilder("Topology")
with self.assertRaises(AssertionError):
TopologyBuilder(123)
with self.assertRaises(AssertionError):
TopologyBuilder(None)
def test_add_spec(self):
builder = TopologyBuilder("Test")
with self.assertRaises(ValueError):
builder.add_spec(HeronComponentSpec(None, "path", True, 1))
with self.assertRaises(TypeError):
builder.add_spec(None)
self.assertEqual(len(builder._specs), 0)
# add 10 specs
specs = []
for i in range(10):
specs.append(HeronComponentSpec(str(i), "path", True, 1))
builder.add_spec(*specs)
self.assertEqual(len(builder._specs), 10)
|
kevintaw/django | refs/heads/master | django/utils/archive.py | 562 | """
Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import tarfile
import zipfile
from django.utils import six
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive(object):
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, six.string_types):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive(object):
"""
Base Archive class. Implementations should inherit this class.
"""
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError('subclasses of BaseArchive must provide an extract() method')
def list(self):
raise NotImplementedError('subclasses of BaseArchive must provide a list() method')
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
# note: python<=2.5 doesn't seem to know about pax headers, filter them
members = [member for member in self._archive.getmembers()
if member.name != 'pax_global_header']
leading = self.has_leading_dir(x.name for x in members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
|
aehlig/bazel | refs/heads/master | tools/android/aar_resources_extractor.py | 3 | # Lint as: python2, python3
# pylint: disable=g-direct-third-party-import
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tool for extracting resource files from an AAR.
An AAR may contain resources under the /res directory. This tool extracts all
of the resources into a directory. If no resources exist, it creates an
empty.xml file that defines no resources.
In the future, this script may be extended to also extract assets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import zipfile
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from absl import app
from absl import flags
import six
from tools.android import junction
FLAGS = flags.FLAGS
flags.DEFINE_string("input_aar", None, "Input AAR")
flags.mark_flag_as_required("input_aar")
flags.DEFINE_string("output_res_dir", None, "Output resources directory")
flags.mark_flag_as_required("output_res_dir")
flags.DEFINE_string("output_assets_dir", None, "Output assets directory")
def ExtractResources(aar, output_res_dir):
"""Extract resource from an `aar` file to the `output_res_dir` directory."""
aar_contains_no_resources = True
output_res_dir_abs = os.path.abspath(output_res_dir)
for name in aar.namelist():
if name.startswith("res/") and not name.endswith("/"):
ExtractOneFile(aar, name, output_res_dir_abs)
aar_contains_no_resources = False
if aar_contains_no_resources:
empty_xml_filename = six.ensure_str(
output_res_dir) + "/res/values/empty.xml"
WriteFileWithJunctions(empty_xml_filename, b"<resources/>")
def ExtractAssets(aar, output_assets_dir):
"""Extracts assets from an `aar` file to the `output_assets_dir` directory."""
aar_contains_no_assets = True
output_assets_dir_abs = os.path.abspath(output_assets_dir)
for name in aar.namelist():
if name.startswith("assets/") and not name.endswith("/"):
ExtractOneFile(aar, name, output_assets_dir_abs)
aar_contains_no_assets = False
if aar_contains_no_assets:
# aapt will ignore this file and not print an error message, because it
# thinks that it is a swap file. We need to create at least one file so that
# Bazel does not complain that the output tree artifact was not created.
empty_asset_filename = (
six.ensure_str(output_assets_dir) +
"/assets/empty_asset_generated_by_bazel~")
WriteFileWithJunctions(empty_asset_filename, b"")
def WriteFileWithJunctions(filename, content):
"""Writes file including creating any junctions or directories necessary."""
def _WriteFile(filename):
with open(filename, "wb") as openfile:
openfile.write(content)
if os.name == "nt":
# Create a junction to the parent directory, because its path might be too
# long. Creating the junction also creates all parent directories.
with junction.TempJunction(os.path.dirname(filename)) as junc:
filename = os.path.join(junc, os.path.basename(filename))
# Write the file within scope of the TempJunction, otherwise the path in
# `filename` would no longer be valid.
_WriteFile(filename)
else:
os.makedirs(os.path.dirname(filename))
_WriteFile(filename)
def ExtractOneFile(aar, name, abs_output_dir):
"""Extract one file from the aar to the output directory."""
if os.name == "nt":
fullpath = os.path.normpath(os.path.join(abs_output_dir, name))
if name[-1] == "/":
# The zip entry is a directory. Create a junction to it, which also
# takes care of creating the directory and all of its parents in a
# longpath-safe manner.
# We must pretend to have extracted this directory, even if it's
# empty, therefore we mustn't rely on creating it as a parent
# directory of a subsequently extracted zip entry (because there may
# be no such subsequent entry).
with junction.TempJunction(fullpath.rstrip("/")) as juncpath:
pass
else:
# The zip entry is a file. Create a junction to its parent directory,
# then open the compressed entry as a file object, so we can extract
# the data even if the extracted file's path would be too long.
# The tradeoff is that we lose the permission bits of the compressed
# file, but Unix permissions don't mean much on Windows anyway.
with junction.TempJunction(os.path.dirname(fullpath)) as juncpath:
extracted_path = os.path.join(juncpath, os.path.basename(fullpath))
with aar.open(name) as src_fd:
with open(extracted_path, "wb") as dest_fd:
dest_fd.write(src_fd.read())
else:
aar.extract(name, abs_output_dir)
def main(unused_argv):
with zipfile.ZipFile(FLAGS.input_aar, "r") as aar:
ExtractResources(aar, FLAGS.output_res_dir)
if FLAGS.output_assets_dir is not None:
ExtractAssets(aar, FLAGS.output_assets_dir)
if __name__ == "__main__":
FLAGS(sys.argv)
app.run(main)
|
thepaul/uftrace | refs/heads/master | tests/t114_replay_trg_time.py | 1 | #!/usr/bin/env python
from runtest import TestBase
import subprocess as sp
TDIR='xxx'
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'sleep', result="""
# DURATION TID FUNCTION
[16873] | main() {
[16873] | foo() {
[16873] | mem_alloc() {
1.675 us [16873] | malloc();
6.867 us [16873] | } /* mem_alloc */
[16873] | bar() {
2.068 ms [16873] | usleep();
2.071 ms [16873] | } /* bar */
2.085 ms [16873] | } /* foo */
2.086 ms [16873] | } /* main */
""")
def pre(self):
record_cmd = '%s record -d %s %s' % (TestBase.uftrace_cmd, TDIR, 't-' + self.name)
sp.call(record_cmd.split())
return TestBase.TEST_SUCCESS
def runcmd(self):
return '%s replay -t 1ms -T mem_alloc@time=0 -d %s' % (TestBase.uftrace_cmd, TDIR)
def post(self, ret):
sp.call(['rm', '-rf', TDIR])
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.