repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
madmax983/h2o-3
|
refs/heads/master
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_DEPRECATED_anomaly_largeDeepLearning.py
|
2
|
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
def anomaly():
print "Deep Learning Anomaly Detection MNIST"
train = h2o.import_file(pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
test = h2o.import_file(pyunit_utils.locate("bigdata/laptop/mnist/test.csv.gz"))
predictors = range(0,784)
resp = 784
# unsupervised -> drop the response column (digit: 0-9)
train = train[predictors]
test = test[predictors]
# 1) LEARN WHAT'S NORMAL
# train unsupervised Deep Learning autoencoder model on train_hex
ae_model = h2o.deeplearning(x=train[predictors],
autoencoder=True,
activation="Tanh",
hidden=[2],
l1=1e-5,
ignore_const_cols=False,
epochs=1
)
ae_model.anomaly(test).show()
# 2) DETECT OUTLIERS
# anomaly app computes the per-row reconstruction error for the test data set
# (passing it through the autoencoder model and computing mean square error (MSE) for each row)
test_rec_error = ae_model.anomaly(test)
# 3) VISUALIZE OUTLIERS
# Let's look at the test set points with low/median/high reconstruction errors.
# We will now visualize the original test set points and their reconstructions obtained
# by propagating them through the narrow neural net.
# Convert the test data into its autoencoded representation (pass through narrow neural net)
test_recon = ae_model.predict(test)
# In python, the visualization could be done with tools like numpy/matplotlib or numpy/PIL
if __name__ == "__main__":
pyunit_utils.standalone_test(anomaly)
else:
anomaly()
|
zephyrplugins/zephyr
|
refs/heads/master
|
zephyr.plugin.jython/jython2.5.2rc3/Lib/test/re_tests.py
|
5
|
#!/usr/bin/env python
# -*- mode: python -*-
# $Id: re_tests.py 4185 2008-02-28 16:55:33Z cgroves $
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
('(Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,') # A few sets
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
#('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
#(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
#(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'\u', '', SYNTAX_ERROR), # A Perl escape
#(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
#(r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
#(r'\x00f', '\017', SUCCEED, 'found', chr(15)),
#(r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
#('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
#('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# All tests from Perl
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab{1,}bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('$b', 'b', FAIL),
('a\\', '-', SYNTAX_ERROR),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
#('^abc', """jkl
#abc
#xyz""", FAIL),
('(?m)^abc', """jkl
abc
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
('a.b', 'a\nb', FAIL),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
#(r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
#(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
#(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
#(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
]
|
abhidrona/gn-osc-custom
|
refs/heads/master
|
oscar/apps/order/migrations/0006_update_offer_name_field.py
|
16
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
OrderDiscount = orm['order.OrderDiscount']
for discount in OrderDiscount.objects.all():
from oscar.apps.offer.models import ConditionalOffer
try:
offer = ConditionalOffer.objects.get(id=discount.offer_id)
except ConditionalOffer.DoesNotExist:
pass
else:
discount.offer_name = offer.name
discount.save()
def backwards(self, orm):
"""
The new offer_name field is redundant information so we don't
have to do anything, here.
"""
models = {
'address.country': {
'Meta': {'ordering': "('-is_highlighted', 'name')", 'object_name': 'Country'},
'is_highlighted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
'order.billingaddress': {
'Meta': {'object_name': 'BillingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.communicationevent': {
'Meta': {'object_name': 'CommunicationEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customer.CommunicationEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communication_events'", 'to': "orm['order.Order']"})
},
'order.line': {
'Meta': {'object_name': 'Line'},
'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': "orm['order.Order']"}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['partner.Partner']"}),
'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'order.lineattribute': {
'Meta': {'object_name': 'LineAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['order.Line']"}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_attributes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['catalogue.Option']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'order.lineprice': {
'Meta': {'ordering': "('id',)", 'object_name': 'LinePrice'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': "orm['order.Line']"}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_prices'", 'to': "orm['order.Order']"}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
'order.order': {
'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
'basket_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.BillingAddress']", 'null': 'True', 'blank': 'True'}),
'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingAddress']", 'null': 'True', 'blank': 'True'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'order.orderdiscount': {
'Meta': {'object_name': 'OrderDiscount'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'offer_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discounts'", 'to': "orm['order.Order']"}),
'voucher_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'voucher_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'order.ordernote': {
'Meta': {'object_name': 'OrderNote'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'note_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['order.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'})
},
'order.paymentevent': {
'Meta': {'object_name': 'PaymentEvent'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.PaymentEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.PaymentEventQuantity']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'to': "orm['order.Order']"})
},
'order.paymenteventquantity': {
'Meta': {'object_name': 'PaymentEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.PaymentEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.paymenteventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'PaymentEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'order.shippingaddress': {
'Meta': {'object_name': 'ShippingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.shippingevent': {
'Meta': {'ordering': "['-date']", 'object_name': 'ShippingEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.ShippingEventQuantity']", 'symmetrical': 'False'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': "orm['order.Order']"})
},
'order.shippingeventquantity': {
'Meta': {'object_name': 'ShippingEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.ShippingEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.shippingeventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'ShippingEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['order']
symmetrical = True
|
socek/Gadu-History
|
refs/heads/master
|
views/userlist.py
|
2
|
#-*- coding: utf-8 -*-
#
#copyright 2010 Dominik "Socek" Długajczyk
#
#This file is part of Gadu History.
#
#Gadu History is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#Gadu History is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Gadu History; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from lib.user import Users, User
from lib.gui import MenuView, BARS
from views.user import UserView
from lib.gui.text import Text, ROText
from lib.cache import SQL
from lib.files import FileManager
from lib.application import app
import curses
from os import listdir
from lib.gui.locals import encode_string
class UserlistView(MenuView):
class AboutDialog(object):
def __init__(self, y, x ):
self._text = [
u'Program: Gadu History',
u'Autor: Dominik "Socek" Długajczyk',
u'Strona: http://projects.socek.org/gaduhistory/ ',
u'Wersja: %.1f' % app._version,
]
self._x = x
self._y = y
max = 0
for text in self._text:
length = len( text )
if length > max:
max = length
self._width = max + 2
self._height = len( self._text ) + 2
self._win = curses.newwin( self._height, self._width, self._x, self._y )
def show(self):
self._win.border()
for loop in range( len( self._text ) ):
self._win.addstr( loop + 1, 1, encode_string(self._text[loop]) )
self._win.refresh()
def hide(self):
self._win.clear()
self._win.refresh()
#-------------------------
def __init__(self):
bar = BARS['userlist']
super( UserlistView, self ).__init__(u"Lista użytkowników", bar = bar )
self._about = self.AboutDialog( 5, 5 )
self._old = None
numbers = []
for obj in Users():
text = "%10d: %-35s" % ( obj.ggnumber, obj.show )
self.add_menu_item( text, UserView( obj ) )
numbers.append( obj.ggnumber )
dir = FileManager._history_dir()
dirs = []
for number in listdir( dir ):
try:
number = int( number )
except:
# the file is not a GG number, so it's not a history file
continue
if number in numbers:
continue
dirs.append( number )
dirs.sort()
for number in dirs:
text = "%10s: %-35s" % ( number, '' )
self.add_menu_item( text, UserView( obj ) )
self.refresh()
def show_number(self, ggnumber):
if FileManager.has_history( ggnumber ):
sql = SQL()
query = 'select * from users where ggnumber=:ggnumber;'
ret = sql.execute(query, {'ggnumber' : ggnumber })
row = ret.fetchone()
user = User(row, ggnumber)
view = UserView( user )
self.clear()
view()
else:
self.refresh()
ROText(1, 0, u'Nie znaleziono histori dla podanego numeru.', u'Błąd' ).run()
def additional_char_handler(self):
# 108 - l
# 115 - s
# 109 - m
# 265 - F1
if self._char == 108:
w = Text(1, 0, u'Podaj numer', only_digits = True)
if w.text != None and len( w.text ) > 0:
self.show_number( int( w.text ) )
return False
if self._char == 115:
w = Text(1, 0, u'Podaj nazwę' )
if w.text != None and len( w.text ) > 0:
self.filter_show( w.text.decode( 'UTF-8' ) )
return False
if self._char == 109:
if self._old != None:
self._list = self._old
self._old = None
self.refresh()
return False
if self._char == 265:
self._about.show()
self._char = self._main.getch()
self._about.hide()
return True
def filter_show(self, name):
name = name.lower()
self._old = self._list
self._list = []
for obj in self._old:
if obj._fun._user.show.lower().find( name ) != -1:
self._list.append( obj )
self._number = 0
self._show_number = 0
self._up = 0
self.refresh()
|
michaelWagner/oppia
|
refs/heads/develop
|
extensions/value_generators/models/generators_test.py
|
23
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for value generators."""
from core.tests import test_utils
from extensions.value_generators.models import generators
class ValueGeneratorUnitTests(test_utils.GenericTestBase):
"""Test that value generators work correctly."""
def test_copier(self):
generator = generators.Copier()
self.assertEqual(generator.generate_value({}, **{'value': 'a'}), 'a')
self.assertEqual(generator.generate_value(
{}, **{'value': 'a', 'parse_with_jinja': False}), 'a')
self.assertEqual(generator.generate_value(
{}, **{'value': '{{a}}', 'parse_with_jinja': False}), '{{a}}')
self.assertEqual(generator.generate_value(
{'a': 'b'}, **{'value': '{{a}}', 'parse_with_jinja': True}), 'b')
def test_random_selector(self):
generator = generators.RandomSelector()
self.assertIn(generator.generate_value(
{}, **{'list_of_values': ['a', 'b', 'c']}), ['a', 'b', 'c'])
|
insidenothing/3D-Printing-Software
|
refs/heads/master
|
skein_engines/skeinforge-35/fabmetheus_utilities/gcodec.py
|
6
|
"""
Gcodec is a collection of utilities to decode and encode gcode.
To run gcodec, install python 2.x on your machine, which is avaliable from http://www.python.org/download/
Then in the folder which gcodec is in, type 'python' in a shell to run the python interpreter. Finally type 'from gcodec import *' to import this program.
Below is an example of gcodec use. This example is run in a terminal in the folder which contains gcodec and Screw Holder Bottom_export.gcode.
>>> from gcodec import *
>>> getFileText('Screw Holder Bottom_export.gcode')
'G90\nG21\nM103\nM105\nM106\nM110 S60.0\nM111 S30.0\nM108 S210.0\nM104 S235.0\nG1 X0.37 Y-4.07 Z1.9 F60.0\nM101\n
..
many lines of text
..
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
import cStringIO
import math
import os
import sys
import traceback
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GPL 3.0'
def addLineAndNewlineIfNecessary(line, output):
'Add the line and if the line does not end with a newline add a newline.'
output.write(line)
if len(line) < 1:
return
if not line.endswith('\n'):
output.write('\n')
def getArcDistance(relativeLocation, splitLine):
'Get arc distance.'
halfPlaneLineDistance = 0.5 * abs(relativeLocation.dropAxis(2))
radius = getDoubleFromCharacterSplitLine('R', splitLine)
if radius == None:
iFloat = getDoubleFromCharacterSplitLine('I', splitLine)
jFloat = getDoubleFromCharacterSplitLine('J', splitLine)
radius = abs(complex(iFloat, jFloat))
angle = 0.0
if radius > 0.0:
halfPlaneLineDistanceOverRadius = halfPlaneLineDistance / radius
if halfPlaneLineDistance < radius:
angle = 2.0 * math.asin(halfPlaneLineDistanceOverRadius)
else:
angle = math.pi * halfPlaneLineDistanceOverRadius
return abs(complex(angle * radius, relativeLocation.z))
def getDoubleAfterFirstLetter(word):
'Get the double value of the word after the first letter.'
return float(word[1 :])
def getDoubleForLetter(letter, splitLine):
'Get the double value of the word after the first occurence of the letter in the split line.'
return getDoubleAfterFirstLetter(splitLine[getIndexOfStartingWithSecond(letter, splitLine)])
def getDoubleFromCharacterSplitLine(character, splitLine):
'Get the double value of the string after the first occurence of the character in the split line.'
indexOfCharacter = getIndexOfStartingWithSecond(character, splitLine)
if indexOfCharacter < 0:
return None
floatString = splitLine[indexOfCharacter][1 :]
try:
return float(floatString)
except ValueError:
return None
def getDoubleFromCharacterSplitLineValue(character, splitLine, value):
'Get the double value of the string after the first occurence of the character in the split line, if it does not exist return the value.'
splitLineFloat = getDoubleFromCharacterSplitLine(character, splitLine)
if splitLineFloat == None:
return value
return splitLineFloat
def getFeedRateMinute(feedRateMinute, splitLine):
'Get the feed rate per minute if the split line has a feed rate.'
indexOfF = getIndexOfStartingWithSecond('F', splitLine)
if indexOfF > 0:
return getDoubleAfterFirstLetter( splitLine[indexOfF] )
return feedRateMinute
def getFirstWord(splitLine):
'Get the first word of a split line.'
if len(splitLine) > 0:
return splitLine[0]
return ''
def getFirstWordFromLine(line):
'Get the first word of a line.'
return getFirstWord(line.split())
def getGcodeFileText(fileName, gcodeText):
'Get the gcode text from a file if it the gcode text is empty and if the file is a gcode file.'
if gcodeText != '':
return gcodeText
if fileName.endswith('.gcode'):
return archive.getFileText(fileName)
return ''
def getIndexOfStartingWithSecond(letter, splitLine):
'Get index of the first occurence of the given letter in the split line, starting with the second word. Return - 1 if letter is not found'
for wordIndex in xrange( 1, len(splitLine) ):
word = splitLine[ wordIndex ]
firstLetter = word[0]
if firstLetter == letter:
return wordIndex
return - 1
def getLineWithValueString(character, line, splitLine, valueString):
'Get the line with a valueString.'
roundedValueString = character + valueString
indexOfValue = getIndexOfStartingWithSecond(character, splitLine)
if indexOfValue == - 1:
return line + ' ' + roundedValueString
word = splitLine[indexOfValue]
return line.replace(word, roundedValueString)
def getLocationFromSplitLine(oldLocation, splitLine):
'Get the location from the split line.'
if oldLocation == None:
oldLocation = Vector3()
return Vector3(
getDoubleFromCharacterSplitLineValue('X', splitLine, oldLocation.x),
getDoubleFromCharacterSplitLineValue('Y', splitLine, oldLocation.y),
getDoubleFromCharacterSplitLineValue('Z', splitLine, oldLocation.z))
def getSplitLineBeforeBracketSemicolon(line):
'Get the split line before a bracket or semicolon.'
semicolonIndex = line.find(';')
if semicolonIndex >= 0:
line = line[ : semicolonIndex ]
bracketIndex = line.find('(')
if bracketIndex > 0:
return line[: bracketIndex].split()
return line.split()
def getStringFromCharacterSplitLine(character, splitLine):
'Get the string after the first occurence of the character in the split line.'
indexOfCharacter = getIndexOfStartingWithSecond(character, splitLine)
if indexOfCharacter < 0:
return None
return splitLine[indexOfCharacter][1 :]
def getWithoutBracketsEqualTab(line):
'Get a string without the greater than sign, the bracket and less than sign, the equal sign or the tab.'
line = line.replace('=', ' ')
line = line.replace('(<', '')
line = line.replace('>', '')
return line.replace('\t', '')
def isProcedureDone(gcodeText, procedure):
'Determine if the procedure has been done on the gcode text.'
if gcodeText == '':
return False
lines = archive.getTextLines(gcodeText)
for line in lines:
withoutBracketsEqualTabQuotes = getWithoutBracketsEqualTab(line).replace('"', '').replace("'", '')
splitLine = getWithoutBracketsEqualTab( withoutBracketsEqualTabQuotes ).split()
firstWord = getFirstWord(splitLine)
if firstWord == 'procedureDone':
if splitLine[1].find(procedure) != -1:
return True
elif firstWord == 'extrusionStart':
return False
procedureIndex = line.find(procedure)
if procedureIndex != -1:
if 'procedureDone' in splitLine:
nextIndex = splitLine.index('procedureDone') + 1
if nextIndex < len(splitLine):
nextWordSplit = splitLine[nextIndex].split(',')
if procedure in nextWordSplit:
return True
return False
def isProcedureDoneOrFileIsEmpty(gcodeText, procedure):
'Determine if the procedure has been done on the gcode text or the file is empty.'
if gcodeText == '':
return True
return isProcedureDone(gcodeText, procedure)
def isThereAFirstWord(firstWord, lines, startIndex):
'Parse gcode until the first word if there is one.'
for lineIndex in xrange(startIndex, len(lines)):
line = lines[lineIndex]
splitLine = getSplitLineBeforeBracketSemicolon(line)
if firstWord == getFirstWord(splitLine):
return True
return False
class BoundingRectangle:
'A class to get the corners of a gcode text.'
def getFromGcodeLines(self, lines, radius):
'Parse gcode text and get the minimum and maximum corners.'
self.cornerMaximum = complex(-999999999.0, -999999999.0)
self.cornerMinimum = complex(999999999.0, 999999999.0)
self.oldLocation = None
self.cornerRadius = complex(radius, radius)
for line in lines:
self.parseCorner(line)
return self
def isPointInside(self, point):
'Determine if the point is inside the bounding rectangle.'
return point.imag >= self.cornerMinimum.imag and point.imag <= self.cornerMaximum.imag and point.real >= self.cornerMinimum.real and point.real <= self.cornerMaximum.real
def parseCorner(self, line):
'Parse a gcode line and use the location to update the bounding corners.'
splitLine = getSplitLineBeforeBracketSemicolon(line)
firstWord = getFirstWord(splitLine)
if firstWord == '(<boundaryPoint>':
locationComplex = getLocationFromSplitLine(None, splitLine).dropAxis(2)
self.cornerMaximum = euclidean.getMaximum(self.cornerMaximum, locationComplex)
self.cornerMinimum = euclidean.getMinimum(self.cornerMinimum, locationComplex)
elif firstWord == 'G1':
location = getLocationFromSplitLine(self.oldLocation, splitLine)
locationComplex = location.dropAxis(2)
self.cornerMaximum = euclidean.getMaximum(self.cornerMaximum, locationComplex + self.cornerRadius)
self.cornerMinimum = euclidean.getMinimum(self.cornerMinimum, locationComplex - self.cornerRadius)
self.oldLocation = location
class DistanceFeedRate:
'A class to limit the z feed rate and round values.'
def __init__(self):
'Initialize.'
self.decimalPlacesCarried = 3
self.output = cStringIO.StringIO()
def addGcodeFromFeedRateThreadZ(self, feedRateMinute, thread, z):
'Add a thread to the output.'
if len(thread) > 0:
self.addGcodeMovementZWithFeedRate(feedRateMinute, thread[0], z)
else:
print('zero length vertex positions array which was skipped over, this should never happen.')
if len(thread) < 2:
print('thread of only one point in addGcodeFromFeedRateThreadZ in gcodec, this should never happen.')
print(thread)
return
self.addLine('M101') # Turn extruder on.
for point in thread[1 :]:
self.addGcodeMovementZWithFeedRate(feedRateMinute, point, z)
self.addLine('M103') # Turn extruder off.
def addGcodeFromLoop(self, loop, z):
'Add the gcode loop.'
euclidean.addSurroundingLoopBeginning(self, loop, z)
self.addPerimeterBlock(loop, z)
self.addLine('(</boundaryPerimeter>)')
self.addLine('(</surroundingLoop>)')
def addGcodeFromThreadZ(self, thread, z):
'Add a thread to the output.'
if len(thread) > 0:
self.addGcodeMovementZ(thread[0], z)
else:
print('zero length vertex positions array which was skipped over, this should never happen.')
if len(thread) < 2:
print('thread of only one point in addGcodeFromThreadZ in gcodec, this should never happen.')
print(thread)
return
self.addLine('M101') # Turn extruder on.
for point in thread[1 :]:
self.addGcodeMovementZ(point, z)
self.addLine('M103') # Turn extruder off.
def addGcodeMovementZ(self, point, z):
'Add a movement to the output.'
self.addLine(self.getLinearGcodeMovement(point, z))
def addGcodeMovementZWithFeedRate(self, feedRateMinute, point, z):
'Add a movement to the output.'
self.addLine(self.getLinearGcodeMovementWithFeedRate(feedRateMinute, point, z))
def addLine(self, line):
'Add a line of text and a newline to the output.'
if len(line) > 0:
self.output.write(line + '\n')
def addLines(self, lines):
'Add lines of text to the output.'
for line in lines:
self.addLine(line)
def addLinesSetAbsoluteDistanceMode(self, lines):
'Add lines of text to the output and ensure the absolute mode is set.'
if len(lines) < 1:
return
if len(lines[0]) < 1:
return
absoluteDistanceMode = True
self.addLine('(<alteration>)')
for line in lines:
splitLine = line.split()
firstWord = getFirstWord(splitLine)
if firstWord == 'G90':
absoluteDistanceMode = True
elif firstWord == 'G91':
absoluteDistanceMode = False
self.addLine(line)
if not absoluteDistanceMode:
self.addLine('G90')
self.addLine('(</alteration>)')
def addParameter(self, firstWord, parameter):
'Add the parameter.'
self.addLine(firstWord + ' S' + euclidean.getRoundedToThreePlaces(parameter))
def addPerimeterBlock(self, loop, z):
'Add the perimeter gcode block for the loop.'
if len(loop) < 2:
return
if euclidean.isWiddershins(loop): # Indicate that a perimeter is beginning.
self.addLine('(<perimeter> outer )')
else:
self.addLine('(<perimeter> inner )')
self.addGcodeFromThreadZ(loop + [loop[0]], z)
self.addLine('(</perimeter>)') # Indicate that a perimeter is beginning.
def addTagBracketedLine(self, tagName, value):
'Add a begin tag, balue and end tag.'
self.addLine('(<%s> %s </%s>)' % (tagName, value, tagName))
def getBoundaryLine(self, location):
'Get boundary gcode line.'
return '(<boundaryPoint> X%s Y%s Z%s </boundaryPoint>)' % (self.getRounded(location.x), self.getRounded(location.y), self.getRounded(location.z))
def getFirstWordMovement(self, firstWord, location):
'Get the start of the arc line.'
return '%s X%s Y%s Z%s' % (firstWord, self.getRounded(location.x), self.getRounded(location.y), self.getRounded(location.z))
def getLinearGcodeMovement(self, point, z):
'Get a linear gcode movement.'
return 'G1 X%s Y%s Z%s' % ( self.getRounded( point.real ), self.getRounded( point.imag ), self.getRounded(z) )
def getLinearGcodeMovementWithFeedRate(self, feedRateMinute, point, z):
'Get a z limited gcode movement.'
linearGcodeMovement = self.getLinearGcodeMovement(point, z)
if feedRateMinute == None:
return linearGcodeMovement
return linearGcodeMovement + ' F' + self.getRounded(feedRateMinute)
def getLineWithFeedRate(self, feedRateMinute, line, splitLine):
'Get the line with a feed rate.'
return getLineWithValueString('F', line, splitLine, self.getRounded(feedRateMinute))
def getLineWithX(self, line, splitLine, x):
'Get the line with an x.'
return getLineWithValueString('X', line, splitLine, self.getRounded(x))
def getLineWithY(self, line, splitLine, y):
'Get the line with a y.'
return getLineWithValueString('Y', line, splitLine, self.getRounded(y))
def getLineWithZ(self, line, splitLine, z):
'Get the line with a z.'
return getLineWithValueString('Z', line, splitLine, self.getRounded(z))
def getRounded(self, number):
'Get number rounded to the number of carried decimal places as a string.'
return euclidean.getRoundedToPlacesString(self.decimalPlacesCarried, number)
def parseSplitLine(self, firstWord, splitLine):
'Parse gcode split line and store the parameters.'
firstWord = getWithoutBracketsEqualTab(firstWord)
if firstWord == 'decimalPlacesCarried':
self.decimalPlacesCarried = int(splitLine[1])
|
killerstorm/ngcccbase
|
refs/heads/master
|
coloredcoinlib/blockchain.py
|
4
|
"""
Data structures to model bitcoin blockchain objects.
"""
import bitcoin.core
import bitcoin.core.serialize
import bitcoin.rpc
from toposort import toposorted
def script_to_raw_address(script):
# extract the destination address from the scriptPubkey
if script[:3] == "\x76\xa9\x14":
return script[3:23]
else:
return None
class COutpoint(object):
def __init__(self, hash, n):
self.hash = hash
self.n = n
class CTxIn(object):
def __init__(self, op_hash, op_n):
self.prevout = COutpoint(op_hash, op_n)
self.nSequence = None
def get_txhash(self):
if self.prevout.hash == 'coinbase':
return self.prevout.hash
else:
return self.prevout.hash.decode('hex')[::-1]
def get_outpoint(self):
return (self.prevout.hash, self.prevout.n)
def set_nSequence(self, nSequence):
self.nSequence = nSequence
class CTxOut(object):
def __init__(self, value, script):
self.value = value
self.script = script
self.raw_address = script_to_raw_address(script)
class CTransaction(object):
def __init__(self, bs):
self.bs = bs
self.have_input_values = False
def get_fee(self):
self.ensure_input_values()
input_value = sum([txin.value for txin in self.inputs])
output_value = sum([txout.value for txout in self.outputs])
return input_value - output_value
@classmethod
def from_bitcoincore(klass, txhash, bctx, bs):
tx = CTransaction(bs)
tx.raw = bctx
tx.hash = txhash
tx.inputs = []
for i in bctx.vin:
if i.prevout.is_null():
tx.inputs.append(CTxIn('coinbase', 0))
else:
op = i.prevout
tx.inputs.append(CTxIn(bitcoin.core.b2lx(op.hash),
op.n))
tx.outputs = []
for o in bctx.vout:
tx.outputs.append(CTxOut(o.nValue, o.scriptPubKey))
return tx
def ensure_input_values(self):
if self.have_input_values:
return
for inp in self.inputs:
prev_tx_hash = inp.prevout.hash
if prev_tx_hash != 'coinbase':
prevtx = self.bs.get_tx(prev_tx_hash)
inp.prevtx = prevtx
inp.value = prevtx.outputs[inp.prevout.n].value
else:
inp.value = 0 # TODO: value of coinbase tx?
self.have_input_values = True
class BlockchainStateBase(object):
def sort_txs(self, tx_list):
block_txs = {h:self.get_tx(h) for h in tx_list}
def get_dependent_txs(tx):
"""all transactions from current block this transaction
directly depends on"""
dependent_txs = []
for inp in tx.inputs:
if inp.prevout.hash in block_txs:
dependent_txs.append(block_txs[inp.prevout.hash])
return dependent_txs
return toposorted(block_txs.values(), get_dependent_txs)
class BlockchainState(BlockchainStateBase):
""" Represents a blockchain state, using bitcoin-RPC to
obtain information of transactions, addresses, and blocks. """
def __init__(self, bitcoind):
self.bitcoind = bitcoind
def publish_tx(self, txdata):
return self.bitcoind.sendrawtransaction(txdata)
@classmethod
def from_url(cls, url, testnet=False):
if testnet:
bitcoind = bitcoin.rpc.RawProxy(
service_url=url, service_port=18332)
else:
bitcoind = bitcoin.rpc.RawProxy( # pragma: no cover
service_url=url) # pragma: no cover
return cls(bitcoind)
def get_block_height(self, blockhash):
block = self.bitcoind.getblock(blockhash)
return block['height']
def get_block_count(self):
return self.bitcoind.getblockcount()
def get_block_hash(self, index):
return self.bitcoind.getblockhash(index)
def get_block(self, blockhash):
return self.bitcoind.getblock(blockhash)
def get_blockhash_at_height(self, height):
return self.bitcoind.getblockhash(height)
def get_previous_blockinfo(self, blockhash):
block_data = self.bitcoind.getblock(blockhash)
return block_data['previousblockhash'], block_data['height']
def get_tx_blockhash(self, txhash):
try:
raw = self.bitcoind.getrawtransaction(txhash, 1)
except Exception, e:
# print txhash, e
return None, False
return raw.get('blockhash', None), True
def get_raw(self, txhash):
return self.bitcoind.getrawtransaction(txhash, 0)
def get_tx(self, txhash):
txhex = self.bitcoind.getrawtransaction(txhash, 0)
txbin = bitcoin.core.x(txhex)
tx = bitcoin.core.CTransaction.deserialize(txbin)
return CTransaction.from_bitcoincore(txhash, tx, self)
def get_best_blockhash(self):
try:
return self.bitcoin.getbestblockhash()
except:
# warning: not atomic!
# remove once bitcoin 0.9 becomes commonplace
count = self.bitcoind.getblockcount()
return self.bitcoind.getblockhash(count)
def iter_block_txs(self, blockhash):
block_hex = None
try:
block_hex = self.bitcoind.getblock(blockhash, False)
except bitcoin.rpc.JSONRPCException:
pass
if block_hex:
# block at once
block = bitcoin.core.CBlock.deserialize(bitcoin.core.x(block_hex))
block_hex = None
for tx in block.vtx:
txhash = bitcoin.core.b2lx(
bitcoin.core.serialize.Hash(tx.serialize()))
yield CTransaction.from_bitcoincore(txhash, tx, self)
else:
txhashes = self.bitcoind.getblock(blockhash)['tx']
for txhash in txhashes:
yield self.get_tx(txhash)
def sort_txs(self, tx_list):
block_txs = {h:self.get_tx(h) for h in tx_list}
def get_dependent_txs(tx):
"""all transactions from current block this transaction
directly depends on"""
dependent_txs = []
for inp in tx.inputs:
if inp.prevout.hash in block_txs:
dependent_txs.append(block_txs[inp.prevout.hash])
return dependent_txs
return toposorted(block_txs.values(), get_dependent_txs)
def get_mempool_txs(self):
return self.sort_txs(self.bitcoind.getrawmempool())
|
IvantheDugtrio/Platypus
|
refs/heads/master
|
scripts/filterTR.py
|
2
|
import sys
threshold = int(sys.argv[1])
for line in sys.stdin:
if line[0] == "#":
print line.strip()
continue
cols = line.strip().split("\t")
info = cols[7]
if "," in cols[4]:
continue
for infoVal in info.split(";"):
name,value = infoVal.split("=")[0:2]
if name == "TR" and int(value) >= threshold:
print line.strip()
|
grahamBamber/coffie
|
refs/heads/master
|
lcd/lcd_perf.py
|
3
|
#!/usr/bin/env python
import time
from lcd import lcddriver
from i2clibraries import i2c_lcd_smbus
lcd1 = lcddriver.lcd()
lcd2 = i2c_lcd_smbus.i2c_lcd(0x3f,1, 2, 1, 0, 4, 5, 6, 7, 3)
lcd2.command(lcd2.CMD_Display_Control | lcd2.OPT_Enable_Display)
lcd2.backLightOn()
def writeLcd1(message):
lcd1.lcd_display_string(str(message), (message % 2) + 1)
def writeLcd2(message):
lcd2.setPosition((message % 2) + 1 , 0)
lcd2.writeString(str(message))
if __name__=='__main__':
for count in [10**2, 10**3]:
_start = time.time()
for ii in xrange(0, count):
writeLcd1(ii)
print "Sending %s numbers to LCD1 took %s seconds" % (count, (time.time() - _start))
for count in [10**2, 10**3]:
_start = time.time()
for ii in xrange(0, count):
writeLcd2(ii)
print "Sending %s numbers to LCD2 took %s seconds" % (count, (time.time() - _start))
|
getavalon/core
|
refs/heads/master
|
avalon/maya/lib.py
|
2
|
"""Standalone helper functions"""
import contextlib
from maya import cmds, mel
from maya.api import OpenMaya as om
def unique_name(name, format="%02d", namespace="", prefix="", suffix=""):
"""Return unique `name`
The function takes into consideration an optional `namespace`
and `suffix`. The suffix is included in evaluating whether a
name exists - such as `name` + "_GRP" - but isn't included
in the returned value.
If a namespace is provided, only names within that namespace
are considered when evaluating whether the name is unique.
Arguments:
format (str, optional): The `name` is given a number, this determines
how this number is formatted. Defaults to a padding of 2.
E.g. my_name01, my_name02.
namespace (str, optional): Only consider names within this namespace.
suffix (str, optional): Only consider names with this suffix.
Example:
>>> name = cmds.createNode("transform", name="MyName")
>>> cmds.objExists(name)
True
>>> unique = unique_name(name)
>>> cmds.objExists(unique)
False
"""
iteration = 1
unique = prefix + (name + format % iteration) + suffix
while cmds.objExists(namespace + ":" + unique):
iteration += 1
unique = prefix + (name + format % iteration) + suffix
if suffix:
return unique[:-len(suffix)]
return unique
def unique_namespace(namespace, format="%02d", prefix="", suffix=""):
"""Return unique namespace
Similar to :func:`unique_name` but evaluating namespaces
as opposed to object names.
Arguments:
namespace (str): Name of namespace to consider
format (str, optional): Formatting of the given iteration number
suffix (str, optional): Only consider namespaces with this suffix.
"""
iteration = 1
unique = prefix + (namespace + format % iteration) + suffix
# The `existing` set does not just contain the namespaces but *all* nodes
# within "current namespace". We need all because the namespace could
# also clash with a node name. To be truly unique and valid one needs to
# check against all.
existing = set(cmds.namespaceInfo(listNamespace=True))
while unique in existing:
iteration += 1
unique = prefix + (namespace + format % iteration) + suffix
return unique
def read(node):
"""Return user-defined attributes from `node`"""
data = dict()
for attr in cmds.listAttr(node, userDefined=True) or list():
try:
value = cmds.getAttr(node + "." + attr, asString=True)
except RuntimeError:
# For Message type attribute or others that have connections,
# take source node name as value.
source = cmds.listConnections(node + "." + attr,
source=True,
destination=False)
source = cmds.ls(source, long=True) or [None]
value = source[0]
except ValueError:
# Some attributes cannot be read directly,
# such as mesh and color attributes. These
# are considered non-essential to this
# particular publishing pipeline.
value = None
data[attr] = value
return data
def export_alembic(nodes,
file,
frame_range=None,
write_uv=True,
write_visibility=True,
attribute_prefix=None):
"""Wrap native MEL command with limited set of arguments
Arguments:
nodes (list): Long names of nodes to cache
file (str): Absolute path to output destination
frame_range (tuple, optional): Start- and end-frame of cache,
default to current animation range.
write_uv (bool, optional): Whether or not to include UVs,
default to True
write_visibility (bool, optional): Turn on to store the visibility
state of objects in the Alembic file. Otherwise, all objects are
considered visible, default to True
attribute_prefix (str, optional): Include all user-defined
attributes with this prefix.
"""
if frame_range is None:
frame_range = (
cmds.playbackOptions(query=True, ast=True),
cmds.playbackOptions(query=True, aet=True)
)
options = [
("file", file),
("frameRange", "%s %s" % frame_range),
] + [("root", mesh) for mesh in nodes]
if isinstance(attribute_prefix, basestring):
# Include all attributes prefixed with "mb"
# TODO(marcus): This would be a good candidate for
# external registration, so that the developer
# doesn't have to edit this function to modify
# the behavior of Alembic export.
options.append(("attrPrefix", str(attribute_prefix)))
if write_uv:
options.append(("uvWrite", ""))
if write_visibility:
options.append(("writeVisibility", ""))
# Generate MEL command
mel_args = list()
for key, value in options:
mel_args.append("-{0} {1}".format(key, value))
mel_args_string = " ".join(mel_args)
mel_cmd = "AbcExport -j \"{0}\"".format(mel_args_string)
# For debuggability, put the string passed to MEL in the Script editor.
print("mel.eval('%s')" % mel_cmd)
return mel.eval(mel_cmd)
@contextlib.contextmanager
def undo_chunk():
"""Open a undo chunk during context."""
try:
cmds.undoInfo(openChunk=True)
yield
finally:
cmds.undoInfo(closeChunk=True)
def imprint(node, data):
"""Write `data` to `node` as userDefined attributes
Arguments:
node (str): Long name of node
data (dict): Dictionary of key/value pairs
Example:
>>> from maya import cmds
>>> def compute():
... return 6
...
>>> cube, generator = cmds.polyCube()
>>> imprint(cube, {
... "regularString": "myFamily",
... "computedValue": lambda: compute()
... })
...
>>> cmds.getAttr(cube + ".computedValue")
6
"""
for key, value in data.items():
if callable(value):
# Support values evaluated at imprint
value = value()
if isinstance(value, bool):
add_type = {"attributeType": "bool"}
set_type = {"keyable": False, "channelBox": True}
elif isinstance(value, basestring):
add_type = {"dataType": "string"}
set_type = {"type": "string"}
elif isinstance(value, int):
add_type = {"attributeType": "long"}
set_type = {"keyable": False, "channelBox": True}
elif isinstance(value, float):
add_type = {"attributeType": "double"}
set_type = {"keyable": False, "channelBox": True}
elif isinstance(value, (list, tuple)):
add_type = {"attributeType": "enum", "enumName": ":".join(value)}
set_type = {"keyable": False, "channelBox": True}
value = 0 # enum default
else:
raise TypeError("Unsupported type: %r" % type(value))
cmds.addAttr(node, longName=key, **add_type)
cmds.setAttr(node + "." + key, value, **set_type)
@contextlib.contextmanager
def without_extension():
"""Use cmds.file with defaultExtensions=False"""
previous_setting = cmds.file(defaultExtensions=True, query=True)
try:
cmds.file(defaultExtensions=False)
yield
finally:
cmds.file(defaultExtensions=previous_setting)
@contextlib.contextmanager
def maintained_selection():
"""Maintain selection during context
Example:
>>> scene = cmds.file(new=True, force=True)
>>> node = cmds.createNode("transform", name="Test")
>>> cmds.select("persp")
>>> with maintained_selection():
... cmds.select("Test", replace=True)
>>> "Test" in cmds.ls(selection=True)
False
"""
previous_selection = cmds.ls(selection=True)
try:
yield
finally:
if previous_selection:
cmds.select(previous_selection,
replace=True,
noExpand=True)
else:
cmds.select(clear=True)
@contextlib.contextmanager
def suspended_refresh():
"""Suspend viewport refreshes"""
try:
cmds.refresh(suspend=True)
yield
finally:
cmds.refresh(suspend=False)
def serialise_shaders(nodes):
"""Generate a shader set dictionary
Arguments:
nodes (list): Absolute paths to nodes
Returns:
dictionary of (shader: id) pairs
Schema:
{
"shader1": ["id1", "id2"],
"shader2": ["id3", "id1"]
}
Example:
{
"Bazooka_Brothers01_:blinn4SG": [
"f9520572-ac1d-11e6-b39e-3085a99791c9.f[4922:5001]",
"f9520572-ac1d-11e6-b39e-3085a99791c9.f[4587:4634]",
"f9520572-ac1d-11e6-b39e-3085a99791c9.f[1120:1567]",
"f9520572-ac1d-11e6-b39e-3085a99791c9.f[4251:4362]"
],
"lambert2SG": [
"f9520571-ac1d-11e6-9dbb-3085a99791c9"
]
}
"""
valid_nodes = cmds.ls(
nodes,
long=True,
recursive=True,
showType=True,
objectsOnly=True,
type="transform"
)
meshes_by_id = {}
for mesh in valid_nodes:
shapes = cmds.listRelatives(valid_nodes[0],
shapes=True,
fullPath=True) or list()
if shapes:
shape = shapes[0]
if not cmds.nodeType(shape):
continue
try:
id_ = cmds.getAttr(mesh + ".mbID")
if id_ not in meshes_by_id:
meshes_by_id[id_] = list()
meshes_by_id[id_].append(mesh)
except ValueError:
continue
meshes_by_shader = dict()
for id_, mesh in meshes_by_id.items():
shape = cmds.listRelatives(mesh,
shapes=True,
fullPath=True) or list()
for shader in cmds.listConnections(shape,
type="shadingEngine") or list():
# Objects in this group are those that haven't got
# any shaders. These are expected to be managed
# elsewhere, such as by the default model loader.
if shader == "initialShadingGroup":
continue
if shader not in meshes_by_shader:
meshes_by_shader[shader] = list()
shaded = cmds.sets(shader, query=True) or list()
meshes_by_shader[shader].extend(shaded)
shader_by_id = {}
for shader, shaded in meshes_by_shader.items():
if shader not in shader_by_id:
shader_by_id[shader] = list()
for mesh in shaded:
# Enable shader assignment to faces.
name = mesh.split(".f[")[0]
transform = name
if cmds.objectType(transform) == "mesh":
transform = cmds.listRelatives(name, parent=True)[0]
try:
id_ = cmds.getAttr(transform + ".mbID")
shader_by_id[shader].append(mesh.replace(name, id_))
except KeyError:
continue
# Remove duplicates
shader_by_id[shader] = list(set(shader_by_id[shader]))
return shader_by_id
def apply_shaders(relationships, namespace=None):
"""Given a dictionary of `relationships`, apply shaders to meshes
Arguments:
relationships (avalon-core:shaders-1.0): A dictionary of
shaders and how they relate to meshes.
"""
if namespace is not None:
# Append namespace to shader group identifier.
# E.g. `blinn1SG` -> `Bruce_:blinn1SG`
relationships = {
"%s:%s" % (namespace, shader): relationships[shader]
for shader in relationships
}
for shader, ids in relationships.items():
print("Looking for '%s'.." % shader)
shader = next(iter(cmds.ls(shader)), None)
assert shader, "Associated shader not part of asset, this is a bug"
for id_ in ids:
mesh, faces = (id_.rsplit(".", 1) + [""])[:2]
# Find all meshes matching this particular ID
# Convert IDs to mesh + id, e.g. "nameOfNode.f[1:100]"
meshes = list(".".join([mesh, faces])
for mesh in lsattr("mbID", value=mesh))
if not meshes:
continue
print("Assigning '%s' to '%s'" % (shader, ", ".join(meshes)))
cmds.sets(meshes, forceElement=shader)
def lsattr(attr, value=None):
"""Return nodes matching `key` and `value`
Arguments:
attr (str): Name of Maya attribute
value (object, optional): Value of attribute. If none
is provided, return all nodes with this attribute.
Example:
>> lsattr("id", "myId")
["myNode"]
>> lsattr("id")
["myNode", "myOtherNode"]
"""
if value is None:
return cmds.ls("*.%s" % attr,
recursive=True,
objectsOnly=True,
long=True)
return lsattrs({attr: value})
def lsattrs(attrs):
"""Return nodes with the given attribute(s).
Arguments:
attrs (dict): Name and value pairs of expected matches
Example:
>> # Return nodes with an `age` of five.
>> lsattr({"age": "five"})
>> # Return nodes with both `age` and `color` of five and blue.
>> lsattr({"age": "five", "color": "blue"})
Return:
list: matching nodes.
"""
dep_fn = om.MFnDependencyNode()
dag_fn = om.MFnDagNode()
selection_list = om.MSelectionList()
first_attr = attrs.iterkeys().next()
try:
selection_list.add("*.{0}".format(first_attr),
searchChildNamespaces=True)
except RuntimeError as exc:
if str(exc).endswith("Object does not exist"):
return []
matches = set()
for i in range(selection_list.length()):
node = selection_list.getDependNode(i)
if node.hasFn(om.MFn.kDagNode):
fn_node = dag_fn.setObject(node)
full_path_names = [path.fullPathName()
for path in fn_node.getAllPaths()]
else:
fn_node = dep_fn.setObject(node)
full_path_names = [fn_node.name()]
for attr in attrs:
try:
plug = fn_node.findPlug(attr, True)
if plug.asString() != attrs[attr]:
break
except RuntimeError:
break
else:
matches.update(full_path_names)
return list(matches)
|
gef756/statsmodels
|
refs/heads/master
|
statsmodels/sandbox/regression/treewalkerclass.py
|
34
|
'''
Formulas
--------
This follows mostly Greene notation (in slides)
partially ignoring factors tau or mu for now, ADDED
(if all tau==1, then runmnl==clogit)
leaf k probability :
Prob(k|j) = exp(b_k * X_k / mu_j)/ sum_{i in L(j)} (exp(b_i * X_i / mu_j)
branch j probabilities :
Prob(j) = exp(b_j * X_j + mu*IV_j )/ sum_{i in NB(j)} (exp(b_i * X_i + mu_i*IV_i)
inclusive value of branch j :
IV_j = log( sum_{i in L(j)} (exp(b_i * X_i / mu_j) )
this is the log of the denominator of the leaf probabilities
L(j) : leaves at branch j, where k is child of j
NB(j) : set of j and it's siblings
Design
------
* splitting calculation transmission between returns and changes to
instance.probs
- probability for each leaf is in instance.probs
- inclusive values and contribution of exog on branch level need to be
added separately. handed up the tree through returns
* question: should params array be accessed directly through
`self.recursionparams[self.parinddict[name]]` or should the dictionary
return the values of the params, e.g. `self.params_node_dict[name]`.
The second would be easier for fixing tau=1 for degenerate branches.
The easiest might be to do the latter only for the taus and default to 1 if
the key ('tau_'+branchname) is not found. I also need to exclude tau for
degenerate branches from params, but then I cannot change them from the
outside for testing and experimentation. (?)
* SAS manual describes restrictions on tau (though their model is a bit
different), e.g. equal tau across sibling branches, fixed tau. The also
allow linear and non-linear (? not sure) restriction on params, the
regression coefficients. Related to previous issue, callback without access
to the underlying array, where params_node_dict returns the actual params
value would provide more flexibility to impose different kinds of restrictions.
bugs/problems
-------------
* singleton branches return zero to `top`, not a value
I'm not sure what they are supposed to return, given the split between returns
and instance.probs DONE
* Why does 'Air' (singleton branch) get probability exactly 0.5 ? DONE
TODO
----
* add tau, normalization for nested logit, currently tau is 1 (clogit)
taus also needs to become part of params MOSTLY DONE
* add effect of branch level explanatory variables DONE
* write a generic multinomial logit that takes arbitrary probabilities, this
would be the same for MNL, clogit and runmnl,
delegate calculation of probabilities
* test on actual data,
- tau=1 replicate clogit numbers,
- transport example from Greene tests 1-level tree and degenerate sub-trees
- test example for multi-level trees ???
* starting values: Greene mentiones that the starting values for the nested
version come from the (non-nested) MNL version. SPSS uses constant equal
(? check transformation) to sample frequencies and zeros for slope
coefficient as starting values for (non-nested) MNL
* associated test statistics
- (I don't think I will fight with the gradient or hessian of the log-like.)
- basic MLE statistics can be generic
- tests specific to the model (?)
* nice printouts since I'm currently collecting a lot of information in the tree
recursion and everything has names
The only parts that are really necessary to get a functional nested logit are
adding the taus (DONE) and the MLE wrapper class. The rest are enhancements.
I added fake tau, one fixed tau for all branches. (OBSOLETE)
It's not clear where the tau for leaf should be added either at
original assignment of self.probs, or as part of the one-step-down
probability correction in the bottom branches. The second would be
cleaner (would make treatment of leaves and branches more symmetric,
but requires that initial assignment in the leaf only does
initialization. e.g self.probs = 1. ???
DONE added taus
still todo:
- tau for degenerate branches are not identified, set to 1 for MLE
- rename parinddict to paramsinddict
Author: Josef Perktold
License : BSD (3-clause)
'''
from __future__ import print_function
from statsmodels.compat.python import lzip, iteritems, itervalues, lrange, zip
import numpy as np
from pprint import pprint
def randintw(w, size=1):
'''generate integer random variables given probabilties
useful because it can be used as index into any array or sequence type
Parameters
----------
w : 1d array_like
sequence of weights, probabilites. The weights are normalized to add
to one.
size : int or tuple of ints
shape of output array
Returns
-------
rvs : array of shape given by size
random variables each distributed according to the same discrete
distribution defined by (normalized) w.
Examples
--------
>>> np.random.seed(0)
>>> randintw([0.4, 0.4, 0.2], size=(2,6))
array([[1, 1, 1, 1, 1, 1],
[1, 2, 2, 0, 1, 1]])
>>> np.bincount(randintw([0.6, 0.4, 0.0], size=3000))/3000.
array([ 0.59566667, 0.40433333])
'''
#from Charles Harris, numpy mailing list
from numpy.random import random
p = np.cumsum(w)/np.sum(w)
rvs = p.searchsorted(random(np.prod(size))).reshape(size)
return rvs
def getbranches(tree):
'''
walk tree to get list of branches
Parameters
----------
tree : list of tuples
tree as defined for RU2NMNL
Returns
-------
branch : list
list of all branch names
'''
if isinstance(tree, tuple):
name, subtree = tree
a = [name]
for st in subtree:
a.extend(getbranches(st))
return a
return []
def getnodes(tree):
'''
walk tree to get list of branches and list of leaves
Parameters
----------
tree : list of tuples
tree as defined for RU2NMNL
Returns
-------
branch : list
list of all branch names
leaves : list
list of all leaves names
'''
if isinstance(tree, tuple):
name, subtree = tree
ab = [name]
al = []
#degenerate branches
if len(subtree) == 1:
adeg = [name]
else:
adeg = []
for st in subtree:
b, l, d = getnodes(st)
ab.extend(b)
al.extend(l)
adeg.extend(d)
return ab, al, adeg
return [], [tree], []
testxb = 2 #global to class to return strings instead of numbers
class RU2NMNL(object):
'''Nested Multinomial Logit with Random Utility 2 parameterization
Parameters
----------
endog : array
not used in this part
exog : dict_like
dictionary access to data where keys correspond to branch and leaf
names. The values are the data arrays for the exog in that node.
tree : nested tuples and lists
each branch, tree or subtree, is defined by a tuple
(branch_name, [subtree1, subtree2, ..., subtreek])
Bottom branches have as subtrees the list of leaf names.
paramsind : dictionary
dictionary that maps branch and leaf names to the names of parameters,
the coefficients for exogs)
Methods
-------
get_probs
Attributes
----------
branches
leaves
paramsnames
parinddict
Notes
-----
endog needs to be encoded so it is consistent with self.leaves, which
defines the columns for the probability array. The ordering in leaves is
determined by the ordering of the tree.
In the dummy encoding of endog, the columns of endog need to have the
same order as self.leaves. In the integer encoding, the integer for a
choice has to correspond to the index in self.leaves.
(This could be made more robust, by handling the endog encoding internally
by leaf names, if endog is defined as categorical variable with
associated category level names.)
'''
def __init__(self, endog, exog, tree, paramsind):
self.endog = endog
self.datadict = exog
self.tree = tree
self.paramsind = paramsind
self.branchsum = ''
self.probs = {}
self.probstxt = {}
self.branchleaves = {}
self.branchvalues = {} #just to keep track of returns by branches
self.branchsums = {}
self.bprobs = {}
self.branches, self.leaves, self.branches_degenerate = getnodes(tree)
self.nbranches = len(self.branches)
#copied over but not quite sure yet
#unique, parameter array names,
#sorted alphabetically, order is/should be only internal
self.paramsnames = (sorted(set([i for j in itervalues(paramsind)
for i in j])) +
['tau_%s' % bname for bname in self.branches])
self.nparams = len(self.paramsnames)
#mapping coefficient names to indices to unique/parameter array
self.paramsidx = dict((name, idx) for (idx,name) in
enumerate(self.paramsnames))
#mapping branch and leaf names to index in parameter array
self.parinddict = dict((k, [self.paramsidx[j] for j in v])
for k,v in iteritems(self.paramsind))
self.recursionparams = 1. + np.arange(len(self.paramsnames))
#for testing that individual parameters are used in the right place
self.recursionparams = np.zeros(len(self.paramsnames))
#self.recursionparams[2] = 1
self.recursionparams[-self.nbranches:] = 1 #values for tau's
#self.recursionparams[-2] = 2
def get_probs(self, params):
'''
obtain the probability array given an array of parameters
This is the function that can be called by loglike or other methods
that need the probabilities as function of the params.
Parameters
----------
params : 1d array, (nparams,)
coefficients and tau that parameterize the model. The required
length can be obtained by nparams. (and will depend on the number
of degenerate leaves - not yet)
Returns
-------
probs : array, (nobs, nchoices)
probabilites for all choices for each observation. The order
is available by attribute leaves. See note in docstring of class
'''
self.recursionparams = params
self.calc_prob(self.tree)
probs_array = np.array([self.probs[leaf] for leaf in self.leaves])
return probs_array
#what's the ordering? Should be the same as sequence in tree.
#TODO: need a check/assert that this sequence is the same as the
# encoding in endog
def calc_prob(self, tree, parent=None):
'''walking a tree bottom-up based on dictionary
'''
#0.5#2 #placeholder for now
#should be tau=self.taus[name] but as part of params for optimization
endog = self.endog
datadict = self.datadict
paramsind = self.paramsind
branchsum = self.branchsum
if isinstance(tree, tuple): #assumes leaves are int for choice index
name, subtree = tree
self.branchleaves[name] = [] #register branch in dictionary
tau = self.recursionparams[self.paramsidx['tau_'+name]]
if DEBUG:
print('----------- starting next branch-----------')
print(name, datadict[name], 'tau=', tau)
print('subtree', subtree)
branchvalue = []
if testxb == 2:
branchsum = 0
elif testxb == 1:
branchsum = datadict[name]
else:
branchsum = name
for b in subtree:
if DEBUG:
print(b)
bv = self.calc_prob(b, name)
bv = np.exp(bv/tau) #this shouldn't be here, when adding branch data
branchvalue.append(bv)
branchsum = branchsum + bv
self.branchvalues[name] = branchvalue #keep track what was returned
if DEBUG:
print('----------- returning to branch-----------')
print(name)
print('branchsum in branch', name, branchsum)
if parent:
if DEBUG:
print('parent', parent)
self.branchleaves[parent].extend(self.branchleaves[name])
if 0: #not name == 'top': # not used anymore !!! ???
#if not name == 'top':
#TODO: do I need this only on the lowest branches ?
tmpsum = 0
for k in self.branchleaves[name]:
#similar to this is now also in return branch values
#depends on what will be returned
tmpsum += self.probs[k]
iv = np.log(tmpsum)
for k in self.branchleaves[name]:
self.probstxt[k] = self.probstxt[k] + ['*' + name + '-prob' +
'(%s)' % ', '.join(self.paramsind[name])]
#TODO: does this use the denominator twice now
self.probs[k] = self.probs[k] / tmpsum
if np.size(self.datadict[name])>0:
#not used yet, might have to move one indentation level
#self.probs[k] = self.probs[k] / tmpsum
## np.exp(-self.datadict[name] *
## np.sum(self.recursionparams[self.parinddict[name]]))
if DEBUG:
print('self.datadict[name], self.probs[k]')
print(self.datadict[name], self.probs[k])
#if not name == 'top':
# self.probs[k] = self.probs[k] * np.exp( iv)
#walk one level down again to add branch probs to instance.probs
self.bprobs[name] = []
for bidx, b in enumerate(subtree):
if DEBUG:
print('repr(b)', repr(b), bidx)
#if len(b) == 1: #TODO: skip leaves, check this
if not isinstance(b, tuple): # isinstance(b, str):
#TODO: replace this with a check for branch (tuple) instead
#this implies name is a bottom branch,
#possible to add special things here
self.bprobs[name].append(self.probs[b])
#TODO: need tau possibly here
self.probs[b] = self.probs[b] / branchsum
if DEBUG:
print('*********** branchsum at bottom branch', branchsum)
#self.bprobs[name].append(self.probs[b])
else:
bname = b[0]
branchsum2 = sum(self.branchvalues[name])
assert np.abs(branchsum - branchsum2).sum() < 1e-8
bprob = branchvalue[bidx]/branchsum
self.bprobs[name].append(bprob)
for k in self.branchleaves[bname]:
if DEBUG:
print('branchprob', bname, k, bprob, branchsum)
#temporary hack with maximum to avoid zeros
self.probs[k] = self.probs[k] * np.maximum(bprob, 1e-4)
if DEBUG:
print('working on branch', tree, branchsum)
if testxb<2:
return branchsum
else: #this is the relevant part
self.branchsums[name] = branchsum
if np.size(self.datadict[name])>0:
branchxb = np.sum(self.datadict[name] *
self.recursionparams[self.parinddict[name]])
else:
branchxb = 0
if not name=='top':
tau = self.recursionparams[self.paramsidx['tau_'+name]]
else:
tau = 1
iv = branchxb + tau * branchsum #which tau: name or parent???
return branchxb + tau * np.log(branchsum) #iv
#branchsum is now IV, TODO: add effect of branch variables
else:
tau = self.recursionparams[self.paramsidx['tau_'+parent]]
if DEBUG:
print('parent', parent)
self.branchleaves[parent].append(tree) # register leave with parent
self.probstxt[tree] = [tree + '-prob' +
'(%s)' % ', '.join(self.paramsind[tree])]
#this is not yet a prob, not normalized to 1, it is exp(x*b)
leafprob = np.exp(np.sum(self.datadict[tree] *
self.recursionparams[self.parinddict[tree]])
/ tau) # fake tau for now, wrong spot ???
#it seems I get the same answer with and without tau here
self.probs[tree] = leafprob #= 1 #try initialization only
#TODO: where should I add tau in the leaves
if testxb == 2:
return np.log(leafprob)
elif testxb == 1:
leavessum = np.array(datadict[tree]) # sum((datadict[bi] for bi in datadict[tree]))
if DEBUG:
print('final branch with', tree, ''.join(tree), leavessum) #sum(tree)
return leavessum #sum(xb[tree])
elif testxb == 0:
return ''.join(tree) #sum(tree)
if __name__ == '__main__':
DEBUG = 0
endog = 5 # dummy place holder
############## Example similar to Greene
#get pickled data
#endog3, xifloat3 = cPickle.load(open('xifloat2.pickle','rb'))
tree0 = ('top',
[('Fly',['Air']),
('Ground', ['Train', 'Car', 'Bus'])
]
)
''' this is with real data from Greene's clogit example
datadict = dict(zip(['Air', 'Train', 'Bus', 'Car'],
[xifloat[i]for i in range(4)]))
'''
#for testing only (mock that returns it's own name
datadict = dict(zip(['Air', 'Train', 'Bus', 'Car'],
['Airdata', 'Traindata', 'Busdata', 'Cardata']))
if testxb:
datadict = dict(zip(['Air', 'Train', 'Bus', 'Car'],
np.arange(4)))
datadict.update({'top' : [],
'Fly' : [],
'Ground': []})
paramsind = {'top' : [],
'Fly' : [],
'Ground': [],
'Air' : ['GC', 'Ttme', 'ConstA', 'Hinc'],
'Train' : ['GC', 'Ttme', 'ConstT'],
'Bus' : ['GC', 'Ttme', 'ConstB'],
'Car' : ['GC', 'Ttme']
}
modru = RU2NMNL(endog, datadict, tree0, paramsind)
modru.recursionparams[-1] = 2
modru.recursionparams[1] = 1
print('Example 1')
print('---------\n')
print(modru.calc_prob(modru.tree))
print('Tree')
pprint(modru.tree)
print('\nmodru.probs')
pprint(modru.probs)
############## example with many layers
tree2 = ('top',
[('B1',['a','b']),
('B2',
[('B21',['c', 'd']),
('B22',['e', 'f', 'g'])
]
),
('B3',['h'])
]
)
#Note: dict looses ordering
paramsind2 = {
'B1': [],
'a': ['consta', 'p'],
'b': ['constb', 'p'],
'B2': ['const2', 'x2'],
'B21': [],
'c': ['constc', 'p', 'time'],
'd': ['constd', 'p', 'time'],
'B22': ['x22'],
'e': ['conste', 'p', 'hince'],
'f': ['constf', 'p', 'hincf'],
'g': [ 'p', 'hincg'],
'B3': [],
'h': ['consth', 'p', 'h'],
'top': []}
datadict2 = dict([i for i in zip('abcdefgh',lrange(8))])
datadict2.update({'top':1000, 'B1':100, 'B2':200, 'B21':21,'B22':22, 'B3':300})
'''
>>> pprint(datadict2)
{'B1': 100,
'B2': 200,
'B21': 21,
'B22': 22,
'B3': 300,
'a': 0.5,
'b': 1,
'c': 2,
'd': 3,
'e': 4,
'f': 5,
'g': 6,
'h': 7,
'top': 1000}
'''
modru2 = RU2NMNL(endog, datadict2, tree2, paramsind2)
modru2.recursionparams[-3] = 2
modru2.recursionparams[3] = 1
print('\n\nExample 2')
print('---------\n')
print(modru2.calc_prob(modru2.tree))
print('Tree')
pprint(modru2.tree)
print('\nmodru.probs')
pprint(modru2.probs)
print('sum of probs', sum(list(itervalues(modru2.probs))))
print('branchvalues')
print(modru2.branchvalues)
print(modru.branchvalues)
print('branch probabilities')
print(modru.bprobs)
print('degenerate branches')
print(modru.branches_degenerate)
'''
>>> modru.bprobs
{'Fly': [], 'top': [0.0016714179077931082, 0.99832858209220687], 'Ground': []}
>>> modru2.bprobs
{'top': [0.25000000000000006, 0.62499999999999989, 0.12500000000000003], 'B22': [], 'B21': [], 'B1': [], 'B2': [0.40000000000000008, 0.59999999999999998], 'B3': []}
'''
params1 = np.array([ 0., 1., 0., 0., 0., 0., 1., 1., 2.])
print(modru.get_probs(params1))
params2 = np.array([ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 1., 1., 1., 2., 1., 1.])
print(modru2.get_probs(params2)) #raises IndexError
|
patrickrolanddg/jaikuengine
|
refs/heads/master
|
middleware/strip_whitespace.py
|
34
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class WhitespaceMiddleware(object):
"""Class to strip leading and trailing whitespace from all form fields.
Note that files are not in POST but in FILES, so this will not touch binary
data.
If it turns out that this breaks something we can add an url white/blacklist.
"""
def _strip_from_values(self, qdict):
copy = None
for k, v in qdict.items():
stripped = v.strip()
if not v == stripped:
if not copy:
copy = qdict.copy()
copy[k] = stripped
if copy:
return copy
return qdict
def process_request(self, request):
request.GET = self._strip_from_values(request.GET)
request.POST = self._strip_from_values(request.POST)
|
legalsylvain/OpenUpgrade
|
refs/heads/master
|
addons/l10n_si/account_wizard.py
|
124
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class AccountWizard_cd(osv.osv_memory):
_inherit='wizard.multi.charts.accounts'
_defaults = {
'code_digits' : 6,
}
|
hackerhelmut/trap-gen
|
refs/heads/master
|
processors/LEON3/LEONDefs.py
|
2
|
# -*- coding: iso-8859-1 -*-
# Just a bunch of definitions which may turn out useful
# Number of register windows, between 2 and 32, default is 8 for LEON3
numRegWindows = 8
# pipelined multiplication unit
pipelinedMult = False
# multiplier size (valid values 16/32)
multiplier_size = 16
|
hujiajie/chromium-crosswalk
|
refs/heads/master
|
tools/telemetry/catapult_base/refactor/annotated_symbol/__init__.py
|
17
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=wildcard-import
from catapult_base.refactor.annotated_symbol.class_definition import *
from catapult_base.refactor.annotated_symbol.function_definition import *
from catapult_base.refactor.annotated_symbol.import_statement import *
from catapult_base.refactor.annotated_symbol.reference import *
from catapult_base.refactor import snippet
__all__ = [
'Annotate',
'Class',
'Function',
'Import',
'Reference',
]
# Specific symbol types with extra methods for manipulating them.
# Python's full grammar is here:
# https://docs.python.org/2/reference/grammar.html
# Annotated Symbols have an Annotate classmethod that takes a symbol type and
# list of children, and returns an instance of that annotated Symbol.
ANNOTATED_SYMBOLS = (
AsName,
Class,
DottedName,
ImportFrom,
ImportName,
Function,
)
# Unfortunately, some logical groupings are not represented by a node in the
# parse tree. To work around this, some annotated Symbols have an Annotate
# classmethod that takes and returns a list of Snippets instead.
ANNOTATED_GROUPINGS = (
Reference,
)
def Annotate(f):
"""Return the syntax tree of the given file."""
return _AnnotateNode(snippet.Snippetize(f))
def _AnnotateNode(node):
if not isinstance(node, snippet.Symbol):
return node
children = map(_AnnotateNode, node.children)
for symbol_type in ANNOTATED_GROUPINGS:
annotated_grouping = symbol_type.Annotate(children)
if annotated_grouping:
children = annotated_grouping
break
for symbol_type in ANNOTATED_SYMBOLS:
annotated_symbol = symbol_type.Annotate(node.type, children)
if annotated_symbol:
return annotated_symbol
return snippet.Symbol(node.type, children)
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/intentions/convertTripleQuotedStringRawStrings.py
|
79
|
S = (<caret>r'''foo
double-only"
single-only'
mix'ed"
'''
"""\r\n"""
r'one line')
|
jchevin/MissionPlanner-master
|
refs/heads/master
|
packages/IronPython.StdLib.2.7.4/content/Lib/encodings/cp866.py
|
593
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp866',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
u'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
u'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
u'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
u'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
u'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u2116' # 0x00fc -> NUMERO SIGN
u'\xa4' # 0x00fd -> CURRENCY SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
0x2116: 0x00fc, # NUMERO SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
CottageLabs/OpenArticleGauge
|
refs/heads/dev
|
openarticlegauge/tests/test_buffer_integration.py
|
1
|
########################################################################################
# Before running these tests, you must start all of the infrastructure with the
# storage buffering turned ON
#
# BUFFERING = True
# BUFFER_FLUSH_PERIOD=6
# BUFFER_GRACE_PERIOD=5
#
# FIXME: we need a better way of starting and re-configuring the running application
# within Celery, but that is less important than getting the app actually working, so
# this clunky integration test will have to suffice for the moment
########################################################################################
from unittest import TestCase
from openarticlegauge import workflow, config, models, cache
import redis, json, datetime, requests, uuid, time
test_host = config.DEFAULT_HOST
test_port = 6379
test_db = 2 # use the real cache database, since this is an integration tests
lookup_url = "http://{host}:5000/lookup/".format(host=config.DEFAULT_HOST)
class TestIntegration(TestCase):
def setUp(self):
self.buffer = config.BUFFERING
self.flush = config.BUFFER_FLUSH_PERIOD
self.grace = config.BUFFER_GRACE_PERIOD
config.BUFFERING = True
config.BUFFER_FLUSH_PERIOD = 6
config.BUFFER_GRACE_PERIOD = 5
config.redis_cache_host = test_host
config.redis_cache_port = test_port
config.redis_cache_db = test_db
def tearDown(self):
client = redis.StrictRedis(host=test_host, port=test_port, db=test_db)
client.delete("doi:10.success/1")
client.delete("doi:10.queued/1")
client.delete("doi:10.cached/1")
client.delete("doi:10.stale/1")
client.delete("doi:10.archived/1")
client.delete("doi:10.1371/journal.pone.0035089")
models.Record(id="doi:10.stale_1").delete()
models.Record(id="doi:10.archived_1").delete()
models.Record(id="doi:10.1371_journal.pone.0035089").delete()
config.BUFFERING = self.buffer
config.BUFFER_FLUSH_PERIOD = self.flush
config.BUFFER_GRACE_PERIOD = self.grace
def test_01_lookup_cache_buffered_archive(self):
# The various vectors we want to test
# - a successful cached and in-date record
# - a cached record which is queued
# - a cached record which is stale, but which has an updated version in the archive
# - a record which is in the archive but not the cache, and which is in-date
now = datetime.datetime.now()
year = datetime.timedelta(days=365)
cached = {
"identifier" : {"id" : "10.cached/1", "type" : "doi", "canonical" : "doi:10.cached/1"},
"bibjson" : {"title" : "mytitle",
"license" : [{
"provenance" : {
"date" : datetime.datetime.strftime(now, "%Y-%m-%dT%H:%M:%SZ")
}
}]
}
}
queued = {
"identifier" : {"id" : "10.queued/1", "type" : "doi", "canonical" : "doi:10.queued/1"},
"queued" : True
}
stale = {
"identifier" : {"id" : "10.stale/1", "type" : "doi", "canonical" : "doi:10.stale/1"},
"bibjson" : {"title" : "another title",
"license" : [{
"provenance" : {
"date" : datetime.datetime.strftime(now - year, "%Y-%m-%dT%H:%M:%SZ")
}
}]
}
}
updated_stale = {
"identifier" : [{"id" : "10.stale/1", "type" : "doi", "canonical" : "doi:10.stale/1"}],
"title" : "updated",
"license" : [{
"provenance" : {
"date" : datetime.datetime.strftime(now, "%Y-%m-%dT%H:%M:%SZ")
}
}]
}
archived = {
"identifier" : [{"id" : "10.archived/1", "type" : "doi", "canonical" : "doi:10.archived/1"}],
"title" : "archived",
"license" : [{
"provenance" : {
"date" : datetime.datetime.strftime(now, "%Y-%m-%dT%H:%M:%SZ")
}
}]
}
# set up the test cache
client = redis.StrictRedis(host=test_host, port=test_port, db=test_db)
client.set("doi:10.cached/1", json.dumps(cached))
client.set("doi:10.queued/1", json.dumps(queued))
client.set("doi:10.stale/1", json.dumps(stale))
# set up the test archive (which will put these things into the buffer)
models.Record.store(updated_stale)
models.Record.store(archived)
resp = requests.post(lookup_url + "10.cached/1,10.queued/1,10.stale/1,10.archived/1")
obj = json.loads(resp.text)
# even though the buffer has not yet been written to the archive, the buffer itself
# should behave as though it is the archive, from the point of view of the rest of
# the app, so we expect the same results as the unbuffered version
assert obj["requested"] == 4, json.dumps(obj, indent=2)
assert len(obj["results"]) == 3, json.dumps(obj, indent=2) # expect: cached, stale (updated version), archived
assert len(obj["processing"]) == 1, json.dumps(obj, indent=2) # expect: queued
def test_02_lookup_cache_actual_archive(self):
# The various vectors we want to test
# - a successful cached and in-date record
# - a cached record which is queued
# - a cached record which is stale, but which has an updated version in the archive
# - a record which is in the archive but not the cache, and which is in-date
now = datetime.datetime.now()
year = datetime.timedelta(days=365)
cached = {
"identifier" : {"id" : "10.cached/1", "type" : "doi", "canonical" : "doi:10.cached/1"},
"bibjson" : {"title" : "mytitle",
"license" : [{
"provenance" : {
"date" : datetime.datetime.strftime(now, "%Y-%m-%dT%H:%M:%SZ")
}
}]
}
}
queued = {
"identifier" : {"id" : "10.queued/1", "type" : "doi", "canonical" : "doi:10.queued/1"},
"queued" : True
}
stale = {
"identifier" : {"id" : "10.stale/1", "type" : "doi", "canonical" : "doi:10.stale/1"},
"bibjson" : {"title" : "another title",
"license" : [{
"provenance" : {
"date" : datetime.datetime.strftime(now - year, "%Y-%m-%dT%H:%M:%SZ")
}
}]
}
}
updated_stale = {
"identifier" : [{"id" : "10.stale/1", "type" : "doi", "canonical" : "doi:10.stale/1"}],
"title" : "updated",
"license" : [{
"provenance" : {
"date" : datetime.datetime.strftime(now, "%Y-%m-%dT%H:%M:%SZ")
}
}]
}
archived = {
"identifier" : [{"id" : "10.archived/1", "type" : "doi", "canonical" : "doi:10.archived/1"}],
"title" : "archived",
"license" : [{
"provenance" : {
"date" : datetime.datetime.strftime(now, "%Y-%m-%dT%H:%M:%SZ")
}
}]
}
# set up the test cache
client = redis.StrictRedis(host=test_host, port=test_port, db=test_db)
client.set("doi:10.cached/1", json.dumps(cached))
client.set("doi:10.queued/1", json.dumps(queued))
client.set("doi:10.stale/1", json.dumps(stale))
# set up the test archive (which will put these things into the buffer)
models.Record.store(updated_stale)
models.Record.store(archived)
resp = requests.post(lookup_url + "10.cached/1,10.queued/1,10.stale/1,10.archived/1")
obj = json.loads(resp.text)
# even though the buffer has not yet been written to the archive, the buffer itself
# should behave as though it is the archive, from the point of view of the rest of
# the app, so we expect the same results as the unbuffered version
assert obj["requested"] == 4, json.dumps(obj, indent=2)
assert len(obj["results"]) == 3, json.dumps(obj, indent=2) # expect: cached, stale (updated version), archived
assert len(obj["processing"]) == 1, json.dumps(obj, indent=2) # expect: queued
# now we need to wait for the buffer to flush before making our next corroborating request
time.sleep(31)
# now do the same request again, and expect the same results
resp = requests.post(lookup_url + "10.cached/1,10.queued/1,10.stale/1,10.archived/1")
obj = json.loads(resp.text)
# even though the buffer has not yet been written to the archive, the buffer itself
# should behave as though it is the archive, from the point of view of the rest of
# the app, so we expect the same results as the unbuffered version
assert obj["requested"] == 4, json.dumps(obj, indent=2)
assert len(obj["results"]) == 3, json.dumps(obj, indent=2) # expect: cached, stale (updated version), archived
assert len(obj["processing"]) == 1, json.dumps(obj, indent=2) # expect: queued
|
tillahoffmann/tensorflow
|
refs/heads/master
|
tensorflow/contrib/keras/api/keras/datasets/boston_housing/__init__.py
|
73
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Boston housing price regression dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.datasets.boston_housing import load_data
del absolute_import
del division
del print_function
|
ted-gould/nova
|
refs/heads/master
|
nova/tests/unit/virt/ironic/test_client_wrapper.py
|
26
|
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient import client as ironic_client
from ironicclient import exc as ironic_exception
import mock
from oslo_config import cfg
from nova import exception
from nova import test
from nova.tests.unit.virt.ironic import utils as ironic_utils
from nova.virt.ironic import client_wrapper
CONF = cfg.CONF
FAKE_CLIENT = ironic_utils.FakeClient()
def get_new_fake_client(*args, **kwargs):
return ironic_utils.FakeClient()
class IronicClientWrapperTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicClientWrapperTestCase, self).setUp()
self.ironicclient = client_wrapper.IronicClientWrapper()
# Do not waste time sleeping
cfg.CONF.set_override('api_retry_interval', 0, 'ironic')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_good_no_args(self, mock_get_client, mock_multi_getattr):
mock_get_client.return_value = FAKE_CLIENT
self.ironicclient.call("node.list")
mock_get_client.assert_called_once_with()
mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
mock_multi_getattr.return_value.assert_called_once_with()
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_good_with_args(self, mock_get_client, mock_multi_getattr):
mock_get_client.return_value = FAKE_CLIENT
self.ironicclient.call("node.list", 'test', associated=True)
mock_get_client.assert_called_once_with()
mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
mock_multi_getattr.return_value.assert_called_once_with(
'test', associated=True)
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_no_auth_token(self, mock_ir_cli):
self.flags(admin_auth_token=None, group='ironic')
ironicclient = client_wrapper.IronicClientWrapper()
# dummy call to have _get_client() called
ironicclient.call("node.list")
expected = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'ironic_url': CONF.ironic.api_endpoint,
'max_retries': CONF.ironic.api_max_retries,
'retry_interval': CONF.ironic.api_retry_interval}
mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
**expected)
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_with_auth_token(self, mock_ir_cli):
self.flags(admin_auth_token='fake-token', group='ironic')
ironicclient = client_wrapper.IronicClientWrapper()
# dummy call to have _get_client() called
ironicclient.call("node.list")
expected = {'os_auth_token': 'fake-token',
'ironic_url': CONF.ironic.api_endpoint,
'max_retries': CONF.ironic.api_max_retries,
'retry_interval': CONF.ironic.api_retry_interval}
mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
**expected)
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_fail(self, mock_get_client, mock_multi_getattr):
cfg.CONF.set_override('api_max_retries', 2, 'ironic')
test_obj = mock.Mock()
test_obj.side_effect = ironic_exception.HTTPServiceUnavailable
mock_multi_getattr.return_value = test_obj
mock_get_client.return_value = FAKE_CLIENT
self.assertRaises(exception.NovaException, self.ironicclient.call,
"node.list")
self.assertEqual(3, test_obj.call_count)
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_with_api_max_retries_neg_conf_val(self, mock_get_client,
mock_multi_getattr):
cfg.CONF.set_default('api_max_retries', -1, 'ironic')
test_obj = mock.Mock()
test_obj.side_effect = ironic_exception.HTTPServiceUnavailable
mock_multi_getattr.return_value = test_obj
mock_get_client.return_value = FAKE_CLIENT
self.assertRaises(exception.NovaException, self.ironicclient.call,
"node.list")
self.assertEqual(1, test_obj.call_count)
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_fail_unexpected_exception(self, mock_get_client,
mock_multi_getattr):
test_obj = mock.Mock()
test_obj.side_effect = ironic_exception.HTTPNotFound
mock_multi_getattr.return_value = test_obj
mock_get_client.return_value = FAKE_CLIENT
self.assertRaises(ironic_exception.HTTPNotFound,
self.ironicclient.call, "node.list")
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_unauthorized(self, mock_get_client):
mock_get_client.side_effect = ironic_exception.Unauthorized
self.assertRaises(exception.NovaException,
self.ironicclient._get_client)
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_unexpected_exception(self, mock_get_client):
mock_get_client.side_effect = ironic_exception.ConnectionRefused
self.assertRaises(ironic_exception.ConnectionRefused,
self.ironicclient._get_client)
def test__multi_getattr_good(self):
response = self.ironicclient._multi_getattr(FAKE_CLIENT, "node.list")
self.assertEqual(FAKE_CLIENT.node.list, response)
def test__multi_getattr_fail(self):
self.assertRaises(AttributeError, self.ironicclient._multi_getattr,
FAKE_CLIENT, "nonexistent")
@mock.patch.object(ironic_client, 'get_client')
def test__client_is_cached(self, mock_get_client):
mock_get_client.side_effect = get_new_fake_client
ironicclient = client_wrapper.IronicClientWrapper()
first_client = ironicclient._get_client()
second_client = ironicclient._get_client()
self.assertEqual(id(first_client), id(second_client))
@mock.patch.object(ironic_client, 'get_client')
def test__invalidate_cached_client(self, mock_get_client):
mock_get_client.side_effect = get_new_fake_client
ironicclient = client_wrapper.IronicClientWrapper()
first_client = ironicclient._get_client()
ironicclient._invalidate_cached_client()
second_client = ironicclient._get_client()
self.assertNotEqual(id(first_client), id(second_client))
@mock.patch.object(ironic_client, 'get_client')
def test_call_uses_cached_client(self, mock_get_client):
mock_get_client.side_effect = get_new_fake_client
ironicclient = client_wrapper.IronicClientWrapper()
for n in range(0, 4):
ironicclient.call("node.list")
self.assertEqual(1, mock_get_client.call_count)
|
TomHeatwole/osf.io
|
refs/heads/develop
|
scripts/tests/test_retract_registrations.py
|
39
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import RegistrationFactory
from tests.factories import UserFactory
from scripts.retract_registrations import main
class TestRetractRegistrations(OsfTestCase):
def setUp(self):
super(TestRetractRegistrations, self).setUp()
self.user = UserFactory()
self.registration = RegistrationFactory(creator=self.user)
self.registration.is_public = True
self.registration.retract_registration(self.user)
self.registration.save()
def test_new_retraction_should_not_be_retracted(self):
assert_false(self.registration.is_retracted)
main(dry_run=False)
assert_false(self.registration.is_retracted)
def test_should_not_retract_pending_retraction_less_than_48_hours_old(self):
# Retraction#iniation_date is read only
self.registration.retraction._fields['initiation_date'].__set__(
self.registration.retraction,
(datetime.utcnow() - timedelta(hours=47)),
safe=True
)
# setattr(self.registration.retraction, 'initiation_date', (datetime.utcnow() - timedelta(hours=47)))
self.registration.retraction.save()
assert_false(self.registration.is_retracted)
main(dry_run=False)
assert_false(self.registration.is_retracted)
def test_should_retract_pending_retraction_that_is_48_hours_old(self):
# Retraction#iniation_date is read only
self.registration.retraction._fields['initiation_date'].__set__(
self.registration.retraction,
(datetime.utcnow() - timedelta(hours=48)),
safe=True
)
self.registration.retraction.save()
assert_false(self.registration.is_retracted)
main(dry_run=False)
assert_true(self.registration.is_retracted)
def test_should_retract_pending_retraction_more_than_48_hours_old(self):
# Retraction#iniation_date is read only
self.registration.retraction._fields['initiation_date'].__set__(
self.registration.retraction,
(datetime.utcnow() - timedelta(days=365)),
safe=True
)
self.registration.retraction.save()
assert_false(self.registration.is_retracted)
main(dry_run=False)
assert_true(self.registration.is_retracted)
def test_retraction_adds_to_parent_projects_log(self):
initial_project_logs = len(self.registration.registered_from.logs)
# Retraction#iniation_date is read only
self.registration.retraction._fields['initiation_date'].__set__(
self.registration.retraction,
(datetime.utcnow() - timedelta(days=365)),
safe=True
)
self.registration.retraction.save()
assert_false(self.registration.is_retracted)
main(dry_run=False)
assert_true(self.registration.is_retracted)
# Logs: Created, made public, retraction initiated, retracted approved
assert_equal(len(self.registration.registered_from.logs), initial_project_logs + 1)
|
supermanheng21/twilio-python
|
refs/heads/master
|
twilio/rest/resources/conferences.py
|
51
|
from .util import parse_date, normalize_dates
from . import InstanceResource, ListResource
class Participant(InstanceResource):
id_key = "call_sid"
def mute(self):
"""
Mute the participant
"""
self.update_instance(muted="true")
def unmute(self):
"""
Unmute the participant
"""
self.update_instance(muted="false")
def kick(self):
"""
Remove the participant from the given conference
"""
self.delete_instance()
class Participants(ListResource):
name = "Participants"
instance = Participant
def list(self, **kwargs):
"""
Returns a list of :class:`Participant` resources in the given
conference
:param conference_sid: Conference this participant is part of
:param boolean muted: If True, only show participants who are muted
"""
return self.get_instances(kwargs)
def mute(self, call_sid):
"""
Mute the given participant
"""
return self.update(call_sid, muted=True)
def unmute(self, call_sid):
"""
Unmute the given participant
"""
return self.update(call_sid, muted=False)
def kick(self, call_sid):
"""
Remove the participant from the given conference
"""
return self.delete(call_sid)
def delete(self, call_sid):
"""
Remove the participant from the given conference
"""
return self.delete_instance(call_sid)
def update(self, sid, **kwargs):
"""
:param sid: Participant identifier
:param boolean muted: If true, mute this participant
"""
return self.update_instance(sid, kwargs)
class Conference(InstanceResource):
subresources = [
Participants
]
class Conferences(ListResource):
name = "Conferences"
instance = Conference
@normalize_dates
def list(self, updated_before=None, updated_after=None, created_after=None,
created_before=None, updated=None, created=None, **kwargs):
"""
Return a list of :class:`Conference` resources
:param status: Show conferences with this status
:param friendly_name: Show conferences with this exact friendly_name
:param date updated_after: List conferences updated after this date
:param date updated_before: List conferences updated before this date
:param date created_after: List conferences created after this date
:param date created_before: List conferences created before this date
"""
kwargs["DateUpdated"] = parse_date(kwargs.get("date_updated", updated))
kwargs["DateCreated"] = parse_date(kwargs.get("date_created", created))
kwargs["DateUpdated<"] = updated_before
kwargs["DateUpdated>"] = updated_after
kwargs["DateCreated<"] = created_before
kwargs["DateCreated>"] = created_after
return self.get_instances(kwargs)
|
laudaa/bitcoin
|
refs/heads/master
|
test/functional/bip9-softforks.py
|
11
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP 9 soft forks.
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
from io import BytesIO
import shutil
import time
import itertools
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, network_thread_start
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
class BIP9SoftForksTest(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1']]
self.setup_clean_chain = True
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 1-A
# check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period
test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
# Test 1-B
# check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period
test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False)
# Test 1-C
# finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN
test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
# check counting stats and "possible" flag before last block of this period achieves LOCKED_IN...
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# ...continue with Test 3
test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
assert_equal(self.get_bip9_status(bipName)['since'], 720)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.clear_all_connections()
self.stop_nodes()
self.nodes = []
shutil.rmtree(self.options.tmpdir + "/node0")
self.setup_chain()
self.setup_network()
self.test.add_all_connections(self.nodes)
network_thread_start()
self.test.p2p_connections[0].wait_for_verack()
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
"""Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
"""
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
"""Modify the nSequence to make it fails once sequence lock rule is
activated (high timespan).
"""
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
"""Modify the nLockTime to make it fails once MTP rule is activated."""
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
|
aurelijusb/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/Demos/guidemo.py
|
17
|
# GUI Demo - just a worker script to invoke all the other demo/test scripts.
import win32ui
import __main__
import sys
import regutil
import win32api
demos = [ \
# ('Font', 'import fontdemo;fontdemo.FontDemo()'),
('Open GL Demo', 'import openGLDemo;openGLDemo.test()'),
('Threaded GUI', 'import threadedgui;threadedgui.ThreadedDemo()'),
('Tree View Demo', 'import hiertest;hiertest.demoboth()'),
('3-Way Splitter Window', 'import splittst;splittst.demo()'),
('Custom Toolbars and Tooltips', 'import toolbar;toolbar.test()'),
('Progress Bar', 'import progressbar;progressbar.demo()'),
('Slider Control', 'import sliderdemo;sliderdemo.demo()'),
('Dynamic window creation', 'import createwin;createwin.demo()'),
('Various Dialog demos', 'import dlgtest;dlgtest.demo()'),
('OCX Control Demo', 'from ocx import ocxtest;ocxtest.demo()'),
('OCX Serial Port Demo', 'from ocx import ocxserialtest; ocxserialtest.test()'),
('IE4 Control Demo', 'from ocx import webbrowser; webbrowser.Demo()'),
]
def demo():
try:
# seeif I can locate the demo files.
import fontdemo
except ImportError:
# else put the demos direectory on the path (if not already)
try:
instPath = regutil.GetRegistryDefaultValue(regutil.BuildDefaultPythonKey() + "\\InstallPath")
except win32api.error:
print "The InstallPath can not be located, and the Demos directory is not on the path"
instPath="."
demosDir = win32ui.FullPath(instPath + "\\Demos")
for path in sys.path:
if win32ui.FullPath(path)==demosDir:
break
else:
sys.path.append(demosDir)
import fontdemo
import sys
if "/go" in sys.argv:
for name, cmd in demos:
try:
exec cmd
except:
print "Demo of %s failed - %s:%s" % (cmd,sys.exc_info()[0], sys.exc_info()[1])
return
# Otherwise allow the user to select the demo to run
import pywin.dialogs.list
while 1:
rc = pywin.dialogs.list.SelectFromLists( "Select a Demo", demos, ['Demo Title'] )
if rc is None:
break
title, cmd = demos[rc]
try:
exec cmd
except:
print "Demo of %s failed - %s:%s" % (title,sys.exc_info()[0], sys.exc_info()[1])
if __name__==__main__.__name__:
import demoutils
if demoutils.NeedGoodGUI():
demo()
|
volab/piclockradio
|
refs/heads/master
|
subprocess_test.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# see: https://docs.python.org/3.3/library/subprocess.html#module-subprocess
import os
import sys
import subprocess
TIMEOUT=3 # seconds
# If you launch a sub-process (even with shell=False), then the
# subprocess.Popen.kill() function will only kill that sub-process (so if there
# are any "grandchild" processes, they won't be terminated.).
# See: http://stackoverflow.com/questions/3908063/python-subprocess-with-shell-true-redirections-and-platform-independent-subproc
#
# The solution is to use preexec_fn to cause the subprocess to acquire it's own
# session group (then a signal is sent to all processes in that session group).
# See: http://stackoverflow.com/questions/3876886/timeout-a-subprocess
def execute(args):
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT, universal_newlines=True, timeout=TIMEOUT)
#output = subprocess.check_output(args, stderr=subprocess.STDOUT, universal_newlines=True, timeout=TIMEOUT, preexec_fn=os.setsid)
print(output)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
except subprocess.CalledProcessError as e:
print("Execution failed:", e, file=sys.stderr)
print(" Cmd:", e.cmd, file=sys.stderr)
print(" Args:", e.args, file=sys.stderr)
print(" Return code:", e.returncode, file=sys.stderr)
print(" Output message:", e.output, file=sys.stderr)
except subprocess.TimeoutExpired as e:
print("Execution stopped:", e, file=sys.stderr)
print(" Cmd:", e.cmd, file=sys.stderr)
print(" Args:", e.args, file=sys.stderr)
print(" Output message:", e.output, file=sys.stderr)
print(" Timeout:", e.timeout, file=sys.stderr)
def main():
"""Main function"""
# subprocess.check_output is a convenience functions (a wrapper).
# For more advanced use cases, the underlying subprocess.Popen interface
# can be used directly.
# Test 1
print("TEST1")
execute(["ls", "."])
print()
# Test 2
print("TEST2")
execute(["ls", "unknown_file"])
print()
# Test 3
print("TEST3")
execute(["unknown_cmd"])
print()
# Test 4
print("TEST4")
execute(["sleep", "10"])
if __name__ == '__main__':
main()
|
zmike/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/memorizingfile.py
|
680
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Memorizing file.
A memorizing file wraps a file and memorizes lines read by readline.
"""
import sys
class MemorizingFile(object):
"""MemorizingFile wraps a file and memorizes lines read by readline.
Note that data read by other methods are not memorized. This behavior
is good enough for memorizing lines SimpleHTTPServer reads before
the control reaches WebSocketRequestHandler.
"""
def __init__(self, file_, max_memorized_lines=sys.maxint):
"""Construct an instance.
Args:
file_: the file object to wrap.
max_memorized_lines: the maximum number of lines to memorize.
Only the first max_memorized_lines are memorized.
Default: sys.maxint.
"""
self._file = file_
self._memorized_lines = []
self._max_memorized_lines = max_memorized_lines
self._buffered = False
self._buffered_line = None
def __getattribute__(self, name):
if name in ('_file', '_memorized_lines', '_max_memorized_lines',
'_buffered', '_buffered_line', 'readline',
'get_memorized_lines'):
return object.__getattribute__(self, name)
return self._file.__getattribute__(name)
def readline(self, size=-1):
"""Override file.readline and memorize the line read.
Note that even if size is specified and smaller than actual size,
the whole line will be read out from underlying file object by
subsequent readline calls.
"""
if self._buffered:
line = self._buffered_line
self._buffered = False
else:
line = self._file.readline()
if line and len(self._memorized_lines) < self._max_memorized_lines:
self._memorized_lines.append(line)
if size >= 0 and size < len(line):
self._buffered = True
self._buffered_line = line[size:]
return line[:size]
return line
def get_memorized_lines(self):
"""Get lines memorized so far."""
return self._memorized_lines
# vi:sts=4 sw=4 et
|
sdague/home-assistant
|
refs/heads/dev
|
homeassistant/components/blebox/sensor.py
|
16
|
"""BleBox sensor entities."""
from homeassistant.helpers.entity import Entity
from . import BleBoxEntity, create_blebox_entities
from .const import BLEBOX_TO_HASS_DEVICE_CLASSES, BLEBOX_TO_UNIT_MAP
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a BleBox entry."""
create_blebox_entities(
hass, config_entry, async_add_entities, BleBoxSensorEntity, "sensors"
)
class BleBoxSensorEntity(BleBoxEntity, Entity):
"""Representation of a BleBox sensor feature."""
@property
def state(self):
"""Return the state."""
return self._feature.current
@property
def unit_of_measurement(self):
"""Return the unit."""
return BLEBOX_TO_UNIT_MAP[self._feature.unit]
@property
def device_class(self):
"""Return the device class."""
return BLEBOX_TO_HASS_DEVICE_CLASSES[self._feature.device_class]
|
josyb/myhdl
|
refs/heads/master
|
myhdl/test/conversion/toVerilog2/test_loops.py
|
4
|
import os
path = os.path
from random import randrange
import myhdl
from myhdl import *
from myhdl.conversion import verify, analyze
from myhdl import ConversionError
from myhdl.conversion._misc import _error
def ForLoopError1(a, out):
@instance
def logic():
while 1:
yield a
var = 0
for i in range(1, 4, -1):
if a[i] == 1:
var += 1
out.next = var
return logic
def LoopBench(LoopTest):
a = Signal(intbv(-1)[16:])
z = Signal(intbv(0)[16:])
looptest_inst = LoopTest(a, z)
data = tuple([randrange(2**min(i, 16)) for i in range(100)])
@instance
def stimulus():
for i in range(100):
a.next = data[i]
yield delay(10)
print(z)
return stimulus, looptest_inst
def testForLoopError1():
try:
analyze(LoopBench, ForLoopError1)
except ConversionError as e:
assert e.kind == _error.Requirement
else:
assert False
|
bootphon/h5features
|
refs/heads/master
|
h5features/__init__.py
|
1
|
# Copyright 2014-2019 Thomas Schatz, Mathieu Bernard, Roland Thiolliere
#
# This file is part of h5features.
#
# h5features is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# h5features is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with h5features. If not, see <http://www.gnu.org/licenses/>.
"""This package defines a standard to read/write features from/to HDF5 files.
.. note::
**The functions are not concurrent nor thread-safe**
because the HDF5 library is not concurrent and not always
thread-safe. Moreover, they aren't even atomic for independent
process (because there are several independent calls to the
file system), so that thread-safety and atomicity of
operations should be enforced externally when necessary.
"""
from .h5features import read
from .h5features import write
from .h5features import simple_write
from .converter import Converter
from .data import Data
from .reader import Reader
from .writer import Writer
|
omni-digital/omni-blocks
|
refs/heads/master
|
tests/test_templatetags.py
|
1
|
from __future__ import unicode_literals
from mock import Mock
from django.test import TestCase
from wagtail_factories import PageFactory
from omni_blocks.blocks.text_blocks import JumpHBlock
from omni_blocks.templatetags.omni_blocks_tags import has_jumplist, get_jumplist
class TestHasJumplist(TestCase):
def setUp(self):
self.page = PageFactory.create(title="Page", parent=None)
def test_jumplist_does_not_exists(self):
"""has_jumplist should return `False` if no JumpHBlock exists."""
self.assertFalse(has_jumplist(self.page, "body"))
def test_jumplist_exists(self):
"""Ensure has_jumplist returns `True` when a JumpHBlock exists."""
field = Mock(block=JumpHBlock(tag="h2"))
self.page.body = [field]
self.assertTrue(has_jumplist(self.page, "body"))
class TestGetJumplist(TestCase):
def setUp(self):
self.page = PageFactory.create(title="Page", parent=None)
def test_no_jumplist_exists(self):
"""Ensure get_jumplist returns an empty list if no JumpHBlock exists."""
result = get_jumplist(self.page, "body")
self.assertEqual(len(result), 0)
def test_jumplist_exists(self):
"""Ensure get_jumplist returns the JumpHBlocks."""
field = Mock(block=JumpHBlock(tag="h2"))
self.page.body = [field, field, field]
result = get_jumplist(self.page, "body")
self.assertEqual(len(result), 3)
|
RaspberryPiFi/PiCode
|
refs/heads/master
|
lib/eyed3/plugins/__init__.py
|
3
|
# -*- coding: utf-8 -*-
################################################################################
# Copyright (C) 2012 Travis Shirk <travis@pobox.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
from __future__ import print_function
import os, sys, logging, types
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from eyed3 import core, utils
from eyed3.utils.console import printMsg, printError
_PLUGINS = {}
log = logging.getLogger(__name__)
def load(name=None, reload=False, paths=None):
'''Returns the eyed3.plugins.Plugin *class* identified by ``name``.
If ``name`` is ``None`` then the full list of plugins is returned.
Once a plugin is loaded its class object is cached, and future calls to
this function will returned the cached version. Use ``reload=True`` to
refresh the cache.'''
global _PLUGINS
if len(list(_PLUGINS.keys())) and not reload:
# Return from the cache if possible
try:
return _PLUGINS[name] if name else _PLUGINS
except KeyError:
# It's not in the cache, look again and refresh cash
_PLUGINS = {}
else:
_PLUGINS = {}
def _isValidModule(f, d):
'''Determine if file ``f`` is a valid module file name.'''
# 1) tis a file
# 2) does not start with '_', or '.'
# 3) avoid the .pyc dup
return bool(os.path.isfile(os.path.join(d, f))
and f[0] not in ('_', '.')
and f.endswith(".py"))
log.debug("Extra plugin paths: %s" % paths)
for d in [os.path.dirname(__file__)] + (paths if paths else []):
log.debug("Searching '%s' for plugins", d)
if not os.path.isdir(d):
continue
if d not in sys.path:
sys.path.append(d)
try:
for f in os.listdir(d):
if not _isValidModule(f, d):
continue
mod_name = os.path.splitext(f)[0]
try:
mod = __import__(mod_name, globals=globals(),
locals=locals())
except ImportError as ex:
log.warning("Plugin '%s' requires packages that are not "
"installed: %s" % ((f, d), ex))
continue
except Exception as ex:
log.exception("Bad plugin '%s'", (f, d))
continue
for attr in [getattr(mod, a) for a in dir(mod)]:
if (type(attr) == type and issubclass(attr, Plugin)):
# This is a eyed3.plugins.Plugin
PluginClass = attr
if (PluginClass not in list(_PLUGINS.values()) and
len(PluginClass.NAMES)):
log.debug("loading plugin '%s' from '%s%s%s'",
mod, d, os.path.sep, f)
# Setting the main name outside the loop to ensure
# there is at least one, otherwise a KeyError is
# thrown.
main_name = PluginClass.NAMES[0]
_PLUGINS[main_name] = PluginClass
for alias in PluginClass.NAMES[1:]:
# Add alternate names
_PLUGINS[alias] = PluginClass
# If 'plugin' is found return it immediately
if name and name in PluginClass.NAMES:
return PluginClass
finally:
if d in sys.path:
sys.path.remove(d)
log.debug("Plugins loaded: %s", _PLUGINS)
if name:
# If a specific plugin was requested and we've not returned yet...
return None
return _PLUGINS
class Plugin(utils.FileHandler):
'''Base class for all eyeD3 plugins'''
SUMMARY = u"eyeD3 plugin"
'''One line about the plugin'''
DESCRIPTION = u""
'''Detailed info about the plugin'''
NAMES = []
'''A list of **at least** one name for invoking the plugin, values [1:]
are treated as alias'''
def __init__(self, arg_parser):
self.arg_parser = arg_parser
self.arg_group = arg_parser.add_argument_group("Plugin options",
"%s\n%s" % (self.SUMMARY,
self.DESCRIPTION))
def start(self, args, config):
'''Called after command line parsing but before any paths are
processed. The ``self.args`` argument (the parsed command line) and
``self.config`` (the user config, if any) is set here.'''
self.args = args
self.config = config
def handleFile(self, f):
pass
def handleDone(self):
pass
class LoaderPlugin(Plugin):
'''A base class that provides auto loading of audio files'''
def __init__(self, arg_parser, cache_files=False):
'''Constructor. If ``cache_files`` is True (off by default) then each
AudioFile is appended to ``_file_cache`` during ``handleFile`` and
the list is cleared by ``handleDirectory``.'''
super(LoaderPlugin, self).__init__(arg_parser)
self._num_loaded = 0
self._file_cache = [] if cache_files else None
def handleFile(self, f, *args, **kwargs):
'''Loads ``f`` and sets ``self.audio_file`` to an instance of
:class:`eyed3.core.AudioFile` or ``None`` if an error occurred or the
file is not a recognized type.
The ``*args`` and ``**kwargs`` are passed to :func:`eyed3.core.load`.
'''
self.audio_file = None
try:
self.audio_file = core.load(f, *args, **kwargs)
except NotImplementedError as ex:
# Frame decryption, for instance...
printError(str(ex))
return
if self.audio_file:
self._num_loaded += 1
if self._file_cache is not None:
self._file_cache.append(self.audio_file)
def handleDirectory(self, d, _):
'''Override to make use of ``self._file_cache``. By default the list
is cleared, subclasses should consider doing the same otherwise every
AudioFile will be cached.'''
if self._file_cache is not None:
self._file_cache = []
def handleDone(self):
'''If no audio files were loaded this simply prints "Nothing to do".'''
if self._num_loaded == 0:
printMsg("Nothing to do")
|
dhanunjaya/neutron
|
refs/heads/master
|
neutron/cmd/eventlet/server/__init__.py
|
10
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.server import rpc_eventlet
from neutron.server import wsgi_eventlet
from neutron.server import wsgi_pecan
def main_wsgi_eventlet():
wsgi_eventlet.main()
# Eventlet patching is not required for Pecan, but some plugins still spawn
# eventlet threads
def main_wsgi_pecan():
wsgi_pecan.main()
def main_rpc_eventlet():
rpc_eventlet.main()
|
xingyepei/edx-platform
|
refs/heads/release
|
lms/djangoapps/instructor/tests/test_offline_gradecalc.py
|
74
|
"""
Tests for offline_gradecalc.py
"""
import json
from mock import patch
from courseware.models import OfflineComputedGrade
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.graders import Score
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..offline_gradecalc import offline_grade_calculation, student_grades
def mock_grade(_student, _request, course, **_kwargs):
""" Return some fake grade data to mock grades.grade() """
return {
'grade': u'Pass',
'totaled_scores': {
u'Homework': [
Score(earned=10.0, possible=10.0, graded=True, section=u'Subsection 1', module_id=None),
]
},
'percent': 0.85,
'raw_scores': [
Score(
earned=5.0, possible=5.0, graded=True, section=u'Numerical Input',
module_id=course.id.make_usage_key('problem', 'problem1'),
),
Score(
earned=5.0, possible=5.0, graded=True, section=u'Multiple Choice',
module_id=course.id.make_usage_key('problem', 'problem2'),
),
],
'section_breakdown': [
{'category': u'Homework', 'percent': 1.0, 'detail': u'Homework 1 - Test - 100% (10/10)', 'label': u'HW 01'},
{'category': u'Final Exam', 'prominent': True, 'percent': 0, 'detail': u'Final = 0%', 'label': u'Final'}
],
'grade_breakdown': [
{'category': u'Homework', 'percent': 0.85, 'detail': u'Homework = 85.00% of a possible 85.00%'},
{'category': u'Final Exam', 'percent': 0.0, 'detail': u'Final Exam = 0.00% of a possible 15.00%'}
]
}
class TestOfflineGradeCalc(ModuleStoreTestCase):
""" Test Offline Grade Calculation with some mocked grades """
def setUp(self):
super(TestOfflineGradeCalc, self).setUp()
with modulestore().default_store(ModuleStoreEnum.Type.split): # Test with split b/c old mongo keys are messy
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
patcher = patch('courseware.grades.grade', new=mock_grade)
patcher.start()
self.addCleanup(patcher.stop)
def test_output(self):
offline_grades = OfflineComputedGrade.objects
self.assertEqual(offline_grades.filter(user=self.user, course_id=self.course.id).count(), 0)
offline_grade_calculation(self.course.id)
result = offline_grades.get(user=self.user, course_id=self.course.id)
decoded = json.loads(result.gradeset)
self.assertEqual(decoded['grade'], "Pass")
self.assertEqual(decoded['percent'], 0.85)
self.assertEqual(decoded['totaled_scores'], {
"Homework": [
{"earned": 10.0, "possible": 10.0, "graded": True, "section": "Subsection 1", "module_id": None}
]
})
self.assertEqual(decoded['raw_scores'], [
{
"earned": 5.0,
"possible": 5.0,
"graded": True,
"section": "Numerical Input",
"module_id": unicode(self.course.id.make_usage_key('problem', 'problem1')),
},
{
"earned": 5.0,
"possible": 5.0,
"graded": True,
"section": "Multiple Choice",
"module_id": unicode(self.course.id.make_usage_key('problem', 'problem2')),
}
])
self.assertEqual(decoded['section_breakdown'], [
{"category": "Homework", "percent": 1.0, "detail": "Homework 1 - Test - 100% (10/10)", "label": "HW 01"},
{"category": "Final Exam", "label": "Final", "percent": 0, "detail": "Final = 0%", "prominent": True}
])
self.assertEqual(decoded['grade_breakdown'], [
{"category": "Homework", "percent": 0.85, "detail": "Homework = 85.00% of a possible 85.00%"},
{"category": "Final Exam", "percent": 0.0, "detail": "Final Exam = 0.00% of a possible 15.00%"}
])
def test_student_grades(self):
""" Test that the data returned by student_grades() and grades.grade() match """
offline_grade_calculation(self.course.id)
with patch('courseware.grades.grade', side_effect=AssertionError('Should not re-grade')):
result = student_grades(self.user, None, self.course, use_offline=True)
self.assertEqual(result, mock_grade(self.user, None, self.course))
|
digitalocean/netbox
|
refs/heads/develop
|
netbox/dcim/migrations/0064_remove_platform_rpc_client.py
|
2
|
# Generated by Django 2.0.8 on 2018-08-22 16:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dcim', '0063_device_local_context_data'),
]
operations = [
migrations.RemoveField(
model_name='platform',
name='rpc_client',
),
]
|
citizenline/citizenline
|
refs/heads/master
|
bettertexts/app_settings.py
|
2
|
from django.conf import settings
STAR_RATINGS_RANGE = getattr(settings, "STAR_RATINGS_RANGE", 5)
|
MDPvis/rlpy
|
refs/heads/master
|
examples/hiv/kifdd.py
|
4
|
from rlpy.Domains.HIVTreatment import HIVTreatment
from rlpy.Agents import Q_Learning
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e4),
np.log(1e8)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=8948708.75,
boyan_N0=627.12,
lambda_=0.5433,
initial_learn_rate=0.59812,
kernel_resolution=24.340):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 150000
opt["num_policy_checks"] = 30
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = HIVTreatment()
opt["domain"] = domain
# domain = FiniteCartPoleBalanceModern()
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_Learning(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
run_profiled(make_experiment)
#experiment = make_experiment(1)
# experiment.run(visualize_learning=True)
# experiment.plot()
# experiment.save()
|
ken-muturi/pombola
|
refs/heads/master
|
pombola/south_africa/management/commands/south_africa_restart_constituency_contacts.py
|
4
|
from datetime import date
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import PositionTitle
# A few days before the election:
date_for_last_active_check = date(2014, 5, 1)
# The date of the final results being announced:
date_to_start_new_positions = date(2014, 5, 10)
class Command(NoArgsCommand):
"""Restart constituency contact positions for re-elected MPs and MPLs"""
help = 'Restart constituency contact positions for re-elected MPs and MPLs'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
pt = PositionTitle.objects.get(name='Constituency Contact')
for old_position in pt.position_set.all(). \
currently_active(date_for_last_active_check):
person = old_position.person
print "Considering", old_position
active_positions = person.position_set.all().currently_active()
# Are they currently an MP or an MPL?
na_memberships = active_positions.filter(
organisation__slug='national-assembly',
title__slug='member')
# FIXME: Why are there two representations of MPLs?
pl_memberships = active_positions.filter(
title__slug='member',
organisation__kind__slug='provincial-legislature')
pl_memberships2 = active_positions.filter(
title__slug='member-of-the-provincial-legislature')
restart = False
if na_memberships:
print " Restarting because", person, "is currently a Member of the National Assembly"
restart = True
if pl_memberships or pl_memberships2:
print " Restarting because", person, "is currently a Member of a Provincial Legislature"
restart = True
if restart:
# Set the primary key to None so that when we save it,
# that creates a new row:
old_position.pk = None
old_position.start_date = ApproximateDate(
*date_to_start_new_positions.timetuple()[0:3]
)
old_position.end_date = ApproximateDate(future=True)
if options['commit']:
print " Saving the new position"
old_position.save()
else:
print " Not saving the new position (--commit not specified)"
|
Technorip/Myntra
|
refs/heads/master
|
Django Backend/myntra/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py
|
3132
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
elit3ge/SickRage
|
refs/heads/master
|
lib/github/InputGitAuthor.py
|
25
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
class InputGitAuthor(object):
"""
"""
def __init__(self, name, email, date):
"""
:param name: string
:param email: string
:param date: string
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(email, (str, unicode)), email
assert isinstance(date, (str, unicode)), date # @todo Datetime?
self.__name = name
self.__email = email
self.__date = date
@property
def _identity(self):
return {
"name": self.__name,
"email": self.__email,
"date": self.__date,
}
|
2014c2g3/0623exam
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/_socket_1.py
|
742
|
"""Implementation module for socket operations.
See the socket module for documentation."""
AF_APPLETALK = 16
AF_DECnet = 12
AF_INET = 2
AF_INET6 = 23
AF_IPX = 6
AF_IRDA = 26
AF_SNA = 11
AF_UNSPEC = 0
AI_ADDRCONFIG = 1024
AI_ALL = 256
AI_CANONNAME = 2
AI_NUMERICHOST = 4
AI_NUMERICSERV = 8
AI_PASSIVE = 1
AI_V4MAPPED = 2048
CAPI = '<capsule object "_socket.CAPI" at 0x00BC4F38>'
EAI_AGAIN = 11002
EAI_BADFLAGS = 10022
EAI_FAIL = 11003
EAI_FAMILY = 10047
EAI_MEMORY = 8
EAI_NODATA = 11001
EAI_NONAME = 11001
EAI_SERVICE = 10109
EAI_SOCKTYPE = 10044
INADDR_ALLHOSTS_GROUP = -536870911
INADDR_ANY = 0
INADDR_BROADCAST = -1
INADDR_LOOPBACK = 2130706433
INADDR_MAX_LOCAL_GROUP = -536870657
INADDR_NONE = -1
INADDR_UNSPEC_GROUP = -536870912
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPROTO_ICMP = 1
IPPROTO_IP = 0
IPPROTO_RAW = 255
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPV6_CHECKSUM = 26
IPV6_DONTFRAG = 14
IPV6_HOPLIMIT = 21
IPV6_HOPOPTS = 1
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_LOOP = 11
IPV6_PKTINFO = 19
IPV6_RECVRTHDR = 38
IPV6_RECVTCLASS = 40
IPV6_RTHDR = 32
IPV6_TCLASS = 39
IPV6_UNICAST_HOPS = 4
IPV6_V6ONLY = 27
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_HDRINCL = 2
IP_MULTICAST_IF = 9
IP_MULTICAST_LOOP = 11
IP_MULTICAST_TTL = 10
IP_OPTIONS = 1
IP_RECVDSTADDR = 25
IP_TOS = 3
IP_TTL = 4
MSG_BCAST = 1024
MSG_CTRUNC = 512
MSG_DONTROUTE = 4
MSG_MCAST = 2048
MSG_OOB = 1
MSG_PEEK = 2
MSG_TRUNC = 256
NI_DGRAM = 16
NI_MAXHOST = 1025
NI_MAXSERV = 32
NI_NAMEREQD = 4
NI_NOFQDN = 1
NI_NUMERICHOST = 2
NI_NUMERICSERV = 8
RCVALL_MAX = 3
RCVALL_OFF = 0
RCVALL_ON = 1
RCVALL_SOCKETLEVELONLY = 2
SHUT_RD = 0
SHUT_RDWR = 2
SHUT_WR = 1
SIO_KEEPALIVE_VALS = 2550136836
SIO_RCVALL = 2550136833
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_STREAM = 1
SOL_IP = 0
SOL_SOCKET = 65535
SOL_TCP = 6
SOL_UDP = 17
SOMAXCONN = 2147483647
SO_ACCEPTCONN = 2
SO_BROADCAST = 32
SO_DEBUG = 1
SO_DONTROUTE = 16
SO_ERROR = 4103
SO_EXCLUSIVEADDRUSE = -5
SO_KEEPALIVE = 8
SO_LINGER = 128
SO_OOBINLINE = 256
SO_RCVBUF = 4098
SO_RCVLOWAT = 4100
SO_RCVTIMEO = 4102
SO_REUSEADDR = 4
SO_SNDBUF = 4097
SO_SNDLOWAT = 4099
SO_SNDTIMEO = 4101
SO_TYPE = 4104
SO_USELOOPBACK = 64
class SocketType:
pass
TCP_MAXSEG = 4
TCP_NODELAY = 1
__loader__ = '<_frozen_importlib.ExtensionFileLoader object at 0x00CA2D90>'
def dup(*args,**kw):
"""dup(integer) -> integer
Duplicate an integer socket file descriptor. This is like os.dup(), but for
sockets; on some platforms os.dup() won't work for socket file descriptors."""
pass
class error:
pass
class gaierror:
pass
def getaddrinfo(*args,**kw):
"""getaddrinfo(host, port [, family, socktype, proto, flags]) -> list of (family, socktype, proto, canonname, sockaddr)
Resolve host and port into addrinfo struct."""
pass
def getdefaulttimeout(*args,**kw):
"""getdefaulttimeout() -> timeout
Returns the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
def gethostbyaddr(*args,**kw):
"""gethostbyaddr(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostbyname(*args,**kw):
"""gethostbyname(host) -> address
Return the IP address (a string of the form '255.255.255.255') for a host."""
pass
def gethostbyname_ex(*args,**kw):
"""gethostbyname_ex(host) -> (name, aliaslist, addresslist)
Return the true host name, a list of aliases, and a list of IP addresses,
for a host. The host argument is a string giving a host name or IP number."""
pass
def gethostname(*args,**kw):
"""gethostname() -> string
Return the current host name."""
pass
def getnameinfo(*args,**kw):
"""getnameinfo(sockaddr, flags) --> (host, port)
Get host and port for a sockaddr."""
pass
def getprotobyname(*args,**kw):
"""getprotobyname(name) -> integer
Return the protocol number for the named protocol. (Rarely used.)"""
pass
def getservbyname(*args,**kw):
"""getservbyname(servicename[, protocolname]) -> integer
Return a port number from a service name and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
def getservbyport(*args,**kw):
"""getservbyport(port[, protocolname]) -> string
Return the service name from a port number and protocol name.
The optional protocol name, if given, should be 'tcp' or 'udp',
otherwise any protocol will match."""
pass
has_ipv6 = True
class herror:
pass
def htonl(*args,**kw):
"""htonl(integer) -> integer
Convert a 32-bit integer from host to network byte order."""
pass
def htons(*args,**kw):
"""htons(integer) -> integer
Convert a 16-bit integer from host to network byte order."""
pass
def inet_aton(*args,**kw):
"""inet_aton(string) -> bytes giving packed 32-bit IP representation
Convert an IP address in string format (123.45.67.89) to the 32-bit packed
binary format used in low-level network functions."""
pass
def inet_ntoa(*args,**kw):
"""inet_ntoa(packed_ip) -> ip_address_string
Convert an IP address from 32-bit packed binary format to string format"""
pass
def ntohl(*args,**kw):
"""ntohl(integer) -> integer
Convert a 32-bit integer from network to host byte order."""
pass
def ntohs(*args,**kw):
"""ntohs(integer) -> integer
Convert a 16-bit integer from network to host byte order."""
pass
def setdefaulttimeout(*args,**kw):
"""setdefaulttimeout(timeout)
Set the default timeout in seconds (float) for new socket objects.
A value of None indicates that new socket objects have no timeout.
When the socket module is first imported, the default is None."""
pass
class socket:
def __init__(self,*args,**kw):
pass
def bind(self,*args,**kw):
pass
def close(self):
pass
class timeout:
pass
|
richard-fisher/repository
|
refs/heads/master
|
programming/cython/actions.py
|
4
|
#!/usr/bin/python
from pisi.actionsapi import pythonmodules
def install():
pythonmodules.install()
|
sgraham/nope
|
refs/heads/master
|
v8/test/intl/testcfg.py
|
75
|
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from testrunner.local import testsuite
from testrunner.objects import testcase
class IntlTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(IntlTestSuite, self).__init__(name, root)
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if (filename.endswith(".js") and filename != "assert.js" and
filename != "utils.js"):
testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
def GetFlagsForTestCase(self, testcase, context):
flags = ["--allow-natives-syntax"] + context.mode_flags
files = []
files.append(os.path.join(self.root, "assert.js"))
files.append(os.path.join(self.root, "utils.js"))
files.append(os.path.join(self.root, testcase.path + self.suffix()))
flags += files
if context.isolates:
flags.append("--isolate")
flags += files
return testcase.flags + flags
def GetSuite(name, root):
return IntlTestSuite(name, root)
|
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/testData/inspections/AddCallSuper1_after.py
|
74
|
class A:
def __init__(self, c, a = 5):
pass
class B(A):
def __init__(self, r, c, b=6):
"""docstring"""
A.__init__(self, c)
print "Constructor B was called"
|
nikolas/raven-python
|
refs/heads/master
|
raven/contrib/django/views.py
|
25
|
"""
raven.contrib.django.views
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from functools import wraps
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from raven.contrib.django.models import client
from raven.utils import json
from raven.utils import six
def is_valid_origin(origin):
if not settings.SENTRY_ALLOW_ORIGIN:
return False
if settings.SENTRY_ALLOW_ORIGIN == '*':
return True
if not origin:
return False
origin = origin.lower()
for value in settings.SENTRY_ALLOW_ORIGIN:
if isinstance(value, six.string_types):
if value.lower() == origin:
return True
else:
if value.match(origin):
return True
return False
def with_origin(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
origin = request.META.get('HTTP_ORIGIN')
if not is_valid_origin(origin):
return HttpResponseForbidden()
response = func(request, *args, **kwargs)
response['Access-Control-Allow-Origin'] = origin
response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
return response
return wrapped
def extract_auth_vars(request):
"""
raven-js will pass both Authorization and X-Sentry-Auth depending on the browser
and server configurations.
"""
if request.META.get('HTTP_X_SENTRY_AUTH', '').startswith('Sentry'):
return request.META['HTTP_X_SENTRY_AUTH']
elif request.META.get('HTTP_AUTHORIZATION', '').startswith('Sentry'):
return request.META['HTTP_AUTHORIZATION']
else:
# Try to construct from GET request
args = [
'%s=%s' % i
for i in request.GET.items()
if i[0].startswith('sentry_') and i[0] != 'sentry_data'
]
if args:
return 'Sentry %s' % ', '.join(args)
return None
@csrf_exempt
@require_http_methods(['GET', 'POST', 'OPTIONS'])
@never_cache
@with_origin
def report(request, project_id=None):
if request.method == 'OPTIONS':
return HttpResponse()
if request.method == 'POST':
if hasattr(request, 'body'):
data = request.body
else:
data = request.raw_post_data
else:
data = request.GET.get('sentry_data')
if not data:
return HttpResponseBadRequest()
try:
decoded = json.loads(data.decode('utf8'))
except json.JSONDecodeError:
return HttpResponseBadRequest()
client.send(auth_header=extract_auth_vars(request), **decoded)
return HttpResponse()
|
OpenUpgrade/OpenUpgrade
|
refs/heads/8.0
|
addons/purchase_requisition/purchase_requisition.py
|
200
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
class purchase_requisition(osv.osv):
_name = "purchase.requisition"
_description = "Purchase Requisition"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _get_po_line(self, cr, uid, ids, field_names, arg=None, context=None):
result = dict((res_id, []) for res_id in ids)
for element in self.browse(cr, uid, ids, context=context):
for po in element.purchase_ids:
result[element.id] += [po_line.id for po_line in po.order_line]
return result
_columns = {
'name': fields.char('Call for Bids Reference', required=True, copy=False),
'origin': fields.char('Source Document'),
'ordering_date': fields.date('Scheduled Ordering Date'),
'date_end': fields.datetime('Bid Submission Deadline'),
'schedule_date': fields.date('Scheduled Date', select=True, help="The expected and scheduled date where all the products are received"),
'user_id': fields.many2one('res.users', 'Responsible'),
'exclusive': fields.selection([('exclusive', 'Select only one RFQ (exclusive)'), ('multiple', 'Select multiple RFQ')], 'Bid Selection Type', required=True, help="Select only one RFQ (exclusive): On the confirmation of a purchase order, it cancels the remaining purchase order.\nSelect multiple RFQ: It allows to have multiple purchase orders.On confirmation of a purchase order it does not cancel the remaining orders"""),
'description': fields.text('Description'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'purchase_ids': fields.one2many('purchase.order', 'requisition_id', 'Purchase Orders', states={'done': [('readonly', True)]}),
'po_line_ids': fields.function(_get_po_line, method=True, type='one2many', relation='purchase.order.line', string='Products by supplier'),
'line_ids': fields.one2many('purchase.requisition.line', 'requisition_id', 'Products to Purchase', states={'done': [('readonly', True)]}, copy=True),
'procurement_id': fields.many2one('procurement.order', 'Procurement', ondelete='set null', copy=False),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'state': fields.selection([('draft', 'Draft'), ('in_progress', 'Confirmed'),
('open', 'Bid Selection'), ('done', 'PO Created'),
('cancel', 'Cancelled')],
'Status', track_visibility='onchange', required=True,
copy=False),
'multiple_rfq_per_supplier': fields.boolean('Multiple RFQ per supplier'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', required=True),
}
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
return obj_data.get_object_reference(cr, uid, 'stock', 'picking_type_in')[1]
_defaults = {
'state': 'draft',
'exclusive': 'multiple',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition', context=c),
'user_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).id,
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),
'picking_type_id': _get_picking_in,
}
def tender_cancel(self, cr, uid, ids, context=None):
purchase_order_obj = self.pool.get('purchase.order')
# try to set all associated quotations to cancel state
for tender in self.browse(cr, uid, ids, context=context):
for purchase_order in tender.purchase_ids:
purchase_order_obj.action_cancel(cr, uid, [purchase_order.id], context=context)
purchase_order_obj.message_post(cr, uid, [purchase_order.id], body=_('Cancelled by the tender associated to this quotation.'), context=context)
return self.write(cr, uid, ids, {'state': 'cancel'})
def tender_in_progress(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'in_progress'}, context=context)
def tender_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def tender_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'})
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id])
self.create_workflow(cr, uid, [p_id])
return True
def tender_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def open_product_line(self, cr, uid, ids, context=None):
""" This opens product line view to view all lines from the different quotations, groupby default by product and partner to show comparaison
between supplier price
@return: the product line tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase_requisition', 'purchase_line_tree', context=context)
res['context'] = context
po_lines = self.browse(cr, uid, ids, context=context)[0].po_line_ids
res['context'] = {
'search_default_groupby_product': True,
'search_default_hide_cancelled': True,
'tender_id': ids[0],
}
res['domain'] = [('id', 'in', [line.id for line in po_lines])]
return res
def open_rfq(self, cr, uid, ids, context=None):
""" This opens rfq view to view all quotations associated to the call for bids
@return: the RFQ tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase', 'purchase_rfq', context=context)
res['context'] = context
po_ids = [po.id for po in self.browse(cr, uid, ids, context=context)[0].purchase_ids]
res['domain'] = [('id', 'in', po_ids)]
return res
def _prepare_purchase_order(self, cr, uid, requisition, supplier, context=None):
supplier_pricelist = supplier.property_product_pricelist_purchase
return {
'origin': requisition.name,
'date_order': requisition.date_end or fields.datetime.now(),
'partner_id': supplier.id,
'pricelist_id': supplier_pricelist.id,
'currency_id': supplier_pricelist and supplier_pricelist.currency_id.id or requisition.company_id.currency_id.id,
'location_id': requisition.procurement_id and requisition.procurement_id.location_id.id or requisition.picking_type_id.default_location_dest_id.id,
'company_id': requisition.company_id.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'requisition_id': requisition.id,
'notes': requisition.description,
'picking_type_id': requisition.picking_type_id.id
}
def _prepare_purchase_order_line(self, cr, uid, requisition, requisition_line, purchase_id, supplier, context=None):
if context is None:
context = {}
po_line_obj = self.pool.get('purchase.order.line')
product_uom = self.pool.get('product.uom')
product = requisition_line.product_id
default_uom_po_id = product.uom_po_id.id
ctx = context.copy()
ctx['tz'] = requisition.user_id.tz
date_order = requisition.ordering_date and fields.date.date_to_datetime(self, cr, uid, requisition.ordering_date, context=ctx) or fields.datetime.now()
qty = product_uom._compute_qty(cr, uid, requisition_line.product_uom_id.id, requisition_line.product_qty, default_uom_po_id)
supplier_pricelist = supplier.property_product_pricelist_purchase and supplier.property_product_pricelist_purchase.id or False
vals = po_line_obj.onchange_product_id(
cr, uid, [], supplier_pricelist, product.id, qty, default_uom_po_id,
supplier.id, date_order=date_order,
fiscal_position_id=supplier.property_account_position.id,
date_planned=requisition_line.schedule_date,
name=False, price_unit=False, state='draft', context=context)['value']
vals.update({
'order_id': purchase_id,
'product_id': product.id,
'account_analytic_id': requisition_line.account_analytic_id.id,
'taxes_id': [(6, 0, vals.get('taxes_id', []))],
})
return vals
def make_purchase_order(self, cr, uid, ids, partner_id, context=None):
"""
Create New RFQ for Supplier
"""
context = dict(context or {})
assert partner_id, 'Supplier should be specified'
purchase_order = self.pool.get('purchase.order')
purchase_order_line = self.pool.get('purchase.order.line')
res_partner = self.pool.get('res.partner')
supplier = res_partner.browse(cr, uid, partner_id, context=context)
res = {}
for requisition in self.browse(cr, uid, ids, context=context):
if not requisition.multiple_rfq_per_supplier and supplier.id in filter(lambda x: x, [rfq.state != 'cancel' and rfq.partner_id.id or None for rfq in requisition.purchase_ids]):
raise osv.except_osv(_('Warning!'), _('You have already one %s purchase order for this partner, you must cancel this purchase order to create a new quotation.') % rfq.state)
context.update({'mail_create_nolog': True})
purchase_id = purchase_order.create(cr, uid, self._prepare_purchase_order(cr, uid, requisition, supplier, context=context), context=context)
purchase_order.message_post(cr, uid, [purchase_id], body=_("RFQ created"), context=context)
res[requisition.id] = purchase_id
for line in requisition.line_ids:
purchase_order_line.create(cr, uid, self._prepare_purchase_order_line(cr, uid, requisition, line, purchase_id, supplier, context=context), context=context)
return res
def check_valid_quotation(self, cr, uid, quotation, context=None):
"""
Check if a quotation has all his order lines bid in order to confirm it if its the case
return True if all order line have been selected during bidding process, else return False
args : 'quotation' must be a browse record
"""
for line in quotation.order_line:
if line.state != 'confirmed' or line.product_qty != line.quantity_bid:
return False
return True
def _prepare_po_from_tender(self, cr, uid, tender, context=None):
""" Prepare the values to write in the purchase order
created from a tender.
:param tender: the source tender from which we generate a purchase order
"""
return {'order_line': [],
'requisition_id': tender.id,
'origin': tender.name}
def _prepare_po_line_from_tender(self, cr, uid, tender, line, purchase_id, context=None):
""" Prepare the values to write in the purchase order line
created from a line of the tender.
:param tender: the source tender from which we generate a purchase order
:param line: the source tender's line from which we generate a line
:param purchase_id: the id of the new purchase
"""
return {'product_qty': line.quantity_bid,
'order_id': purchase_id}
def generate_po(self, cr, uid, ids, context=None):
"""
Generate all purchase order based on selected lines, should only be called on one tender at a time
"""
po = self.pool.get('purchase.order')
poline = self.pool.get('purchase.order.line')
id_per_supplier = {}
for tender in self.browse(cr, uid, ids, context=context):
if tender.state == 'done':
raise osv.except_osv(_('Warning!'), _('You have already generate the purchase order(s).'))
confirm = False
#check that we have at least confirm one line
for po_line in tender.po_line_ids:
if po_line.state == 'confirmed':
confirm = True
break
if not confirm:
raise osv.except_osv(_('Warning!'), _('You have no line selected for buying.'))
#check for complete RFQ
for quotation in tender.purchase_ids:
if (self.check_valid_quotation(cr, uid, quotation, context=context)):
#use workflow to set PO state to confirm
po.signal_workflow(cr, uid, [quotation.id], 'purchase_confirm')
#get other confirmed lines per supplier
for po_line in tender.po_line_ids:
#only take into account confirmed line that does not belong to already confirmed purchase order
if po_line.state == 'confirmed' and po_line.order_id.state in ['draft', 'sent', 'bid']:
if id_per_supplier.get(po_line.partner_id.id):
id_per_supplier[po_line.partner_id.id].append(po_line)
else:
id_per_supplier[po_line.partner_id.id] = [po_line]
#generate po based on supplier and cancel all previous RFQ
ctx = dict(context or {}, force_requisition_id=True)
for supplier, product_line in id_per_supplier.items():
#copy a quotation for this supplier and change order_line then validate it
quotation_id = po.search(cr, uid, [('requisition_id', '=', tender.id), ('partner_id', '=', supplier)], limit=1)[0]
vals = self._prepare_po_from_tender(cr, uid, tender, context=context)
new_po = po.copy(cr, uid, quotation_id, default=vals, context=context)
#duplicate po_line and change product_qty if needed and associate them to newly created PO
for line in product_line:
vals = self._prepare_po_line_from_tender(cr, uid, tender, line, new_po, context=context)
poline.copy(cr, uid, line.id, default=vals, context=context)
#use workflow to set new PO state to confirm
po.signal_workflow(cr, uid, [new_po], 'purchase_confirm')
#cancel other orders
self.cancel_unconfirmed_quotations(cr, uid, tender, context=context)
#set tender to state done
self.signal_workflow(cr, uid, [tender.id], 'done')
return True
def cancel_unconfirmed_quotations(self, cr, uid, tender, context=None):
#cancel other orders
po = self.pool.get('purchase.order')
for quotation in tender.purchase_ids:
if quotation.state in ['draft', 'sent', 'bid']:
self.pool.get('purchase.order').signal_workflow(cr, uid, [quotation.id], 'purchase_cancel')
po.message_post(cr, uid, [quotation.id], body=_('Cancelled by the call for bids associated to this request for quotation.'), context=context)
return True
class purchase_requisition_line(osv.osv):
_name = "purchase.requisition.line"
_description = "Purchase Requisition Line"
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok', '=', True)]),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'requisition_id': fields.many2one('purchase.requisition', 'Call for Bids', ondelete='cascade'),
'company_id': fields.related('requisition_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account',),
'schedule_date': fields.date('Scheduled Date'),
}
def onchange_product_id(self, cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=None):
""" Changes UoM and name if product_id changes.
@param name: Name of the field
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
value = {'product_uom_id': ''}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'product_uom_id': prod.uom_id.id, 'product_qty': 1.0}
if not analytic_account:
value.update({'account_analytic_id': parent_analytic_account})
if not date:
value.update({'schedule_date': parent_date})
return {'value': value}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition.line', context=c),
}
class purchase_order(osv.osv):
_inherit = "purchase.order"
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Call for Bids', copy=False),
}
def wkf_confirm_order(self, cr, uid, ids, context=None):
res = super(purchase_order, self).wkf_confirm_order(cr, uid, ids, context=context)
proc_obj = self.pool.get('procurement.order')
for po in self.browse(cr, uid, ids, context=context):
if po.requisition_id and (po.requisition_id.exclusive == 'exclusive'):
for order in po.requisition_id.purchase_ids:
if order.id != po.id:
proc_ids = proc_obj.search(cr, uid, [('purchase_id', '=', order.id)])
if proc_ids and po.state == 'confirmed':
proc_obj.write(cr, uid, proc_ids, {'purchase_id': po.id})
order.signal_workflow('purchase_cancel')
po.requisition_id.tender_done(context=context)
return res
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
stock_move_lines = super(purchase_order, self)._prepare_order_line_move(cr, uid, order, order_line, picking_id, group_id, context=context)
if order.requisition_id and order.requisition_id.procurement_id and order.requisition_id.procurement_id.move_dest_id:
for i in range(0, len(stock_move_lines)):
stock_move_lines[i]['move_dest_id'] = order.requisition_id.procurement_id.move_dest_id.id
return stock_move_lines
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
_columns = {
'quantity_bid': fields.float('Quantity Bid', digits_compute=dp.get_precision('Product Unit of Measure'), help="Technical field for not loosing the initial information about the quantity proposed in the bid"),
}
def action_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_confirm(self, cr, uid, ids, context=None):
super(purchase_order_line, self).action_confirm(cr, uid, ids, context=context)
for element in self.browse(cr, uid, ids, context=context):
if not element.quantity_bid:
self.write(cr, uid, ids, {'quantity_bid': element.product_qty}, context=context)
return True
def generate_po(self, cr, uid, tender_id, context=None):
#call generate_po from tender with active_id. Called from js widget
return self.pool.get('purchase.requisition').generate_po(cr, uid, [tender_id], context=context)
class product_template(osv.osv):
_inherit = 'product.template'
_columns = {
'purchase_requisition': fields.boolean('Call for Bids', help="Check this box to generate Call for Bids instead of generating requests for quotation from procurement.")
}
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Latest Requisition')
}
def _run(self, cr, uid, procurement, context=None):
requisition_obj = self.pool.get('purchase.requisition')
warehouse_obj = self.pool.get('stock.warehouse')
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
warehouse_id = warehouse_obj.search(cr, uid, [('company_id', '=', procurement.company_id.id)], context=context)
requisition_id = requisition_obj.create(cr, uid, {
'origin': procurement.origin,
'date_end': procurement.date_planned,
'warehouse_id': warehouse_id and warehouse_id[0] or False,
'company_id': procurement.company_id.id,
'procurement_id': procurement.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'line_ids': [(0, 0, {
'product_id': procurement.product_id.id,
'product_uom_id': procurement.product_uom.id,
'product_qty': procurement.product_qty
})],
})
self.message_post(cr, uid, [procurement.id], body=_("Purchase Requisition created"), context=context)
return self.write(cr, uid, [procurement.id], {'requisition_id': requisition_id}, context=context)
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
if procurement.requisition_id.state == 'done':
if any([purchase.shipped for purchase in procurement.requisition_id.purchase_ids]):
return True
return False
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
|
robotlinker/robotlinker_core
|
refs/heads/master
|
src/rosbridge_suite/rosbridge_server/src/tornado/locale.py
|
160
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation methods for generating localized strings.
To load a locale and generate a translated string::
user_locale = tornado.locale.get("es_LA")
print user_locale.translate("Sign out")
`tornado.locale.get()` returns the closest matching locale, not necessarily the
specific locale you requested. You can support pluralization with
additional arguments to `~Locale.translate()`, e.g.::
people = [...]
message = user_locale.translate(
"%(list)s is online", "%(list)s are online", len(people))
print message % {"list": user_locale.list(people)}
The first string is chosen if ``len(people) == 1``, otherwise the second
string is chosen.
Applications should call one of `load_translations` (which uses a simple
CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
supported by `gettext` and related tools). If neither method is called,
the `Locale.translate` method will simply return the original string.
"""
from __future__ import absolute_import, division, print_function, with_statement
import csv
import datetime
import numbers
import os
import re
from tornado import escape
from tornado.log import gen_log
from tornado.util import u
_default_locale = "en_US"
_translations = {}
_supported_locales = frozenset([_default_locale])
_use_gettext = False
def get(*locale_codes):
"""Returns the closest match for the given locale codes.
We iterate over all given locale codes in order. If we have a tight
or a loose match for the code (e.g., "en" for "en_US"), we return
the locale. Otherwise we move to the next code in the list.
By default we return ``en_US`` if no translations are found for any of
the specified locales. You can change the default locale with
`set_default_locale()`.
"""
return Locale.get_closest(*locale_codes)
def set_default_locale(code):
"""Sets the default locale.
The default locale is assumed to be the language used for all strings
in the system. The translations loaded from disk are mappings from
the default locale to the destination locale. Consequently, you don't
need to create a translation file for the default locale.
"""
global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
def load_translations(directory):
"""Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders
(e.g., ``My name is %(name)s``) and their associated translations.
The directory should have translation files of the form ``LOCALE.csv``,
e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
translation, and an optional plural indicator. Plural indicators should
be one of "plural" or "singular". A given string can have both singular
and plural forms. For example ``%(name)s liked this`` may have a
different verb conjugation depending on whether %(name)s is one
name or a list of names. There should be two rows in the CSV file for
that string, one with plural indicator "singular", and one "plural".
For strings with no verbs that would change on translation, simply
use "unknown" or the empty string (or don't include the column at all).
The file is read using the `csv` module in the default "excel" dialect.
In this format there should not be spaces after the commas.
Example translation ``es_LA.csv``::
"I love you","Te amo"
"%(name)s liked this","A %(name)s les gustó esto","plural"
"%(name)s liked this","A %(name)s le gustó esto","singular"
"""
global _translations
global _supported_locales
_translations = {}
for path in os.listdir(directory):
if not path.endswith(".csv"):
continue
locale, extension = path.split(".")
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
gen_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
full_path = os.path.join(directory, path)
try:
# python 3: csv.reader requires a file open in text mode.
# Force utf8 to avoid dependence on $LANG environment variable.
f = open(full_path, "r", encoding="utf-8")
except TypeError:
# python 2: files return byte strings, which are decoded below.
f = open(full_path, "r")
_translations[locale] = {}
for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2:
continue
row = [escape.to_unicode(c).strip() for c in row]
english, translation = row[:2]
if len(row) > 2:
plural = row[2] or "unknown"
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
gen_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have you app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
import gettext
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith('.'):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales():
"""Returns a list of all the supported locale codes."""
return _supported_locales
class Locale(object):
"""Object representing a locale.
After calling one of `load_translations` or `load_gettext_translations`,
call `get` or `get_closest` to get a Locale object.
"""
@classmethod
def get_closest(cls, *locale_codes):
"""Returns the closest match for the given locale code."""
for code in locale_codes:
if not code:
continue
code = code.replace("-", "_")
parts = code.split("_")
if len(parts) > 2:
continue
elif len(parts) == 2:
code = parts[0].lower() + "_" + parts[1].upper()
if code in _supported_locales:
return cls.get(code)
if parts[0].lower() in _supported_locales:
return cls.get(parts[0].lower())
return cls.get(_default_locale)
@classmethod
def get(cls, code):
"""Returns the Locale for the given locale code.
If it is not supported, we raise an exception.
"""
if not hasattr(cls, "_cache"):
cls._cache = {}
if code not in cls._cache:
assert code in _supported_locales
translations = _translations.get(code, None)
if translations is None:
locale = CSVLocale(code, {})
elif _use_gettext:
locale = GettextLocale(code, translations)
else:
locale = CSVLocale(code, translations)
cls._cache[code] = locale
return cls._cache[code]
def __init__(self, code, translations):
self.code = code
self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown"))
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
self.rtl = True
break
self.translations = translations
# Initialize strings for date formatting
_ = self.translate
self._months = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
self._weekdays = [
_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
_("Friday"), _("Saturday"), _("Sunday")]
def translate(self, message, plural_message=None, count=None):
"""Returns the translation for the given message for this locale.
If ``plural_message`` is given, you must also provide
``count``. We return ``plural_message`` when ``count != 1``,
and we return the singular form for the given message when
``count == 1``.
"""
raise NotImplementedError()
def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
"""
if isinstance(date, numbers.Real):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
_ = self.translate
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return _("1 second ago", "%(seconds)d seconds ago",
seconds) % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return _("1 minute ago", "%(minutes)d minutes ago",
minutes) % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return _("1 hour ago", "%(hours)d hours ago",
hours) % {"hours": hours}
if days == 0:
format = _("%(time)s")
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = _("yesterday") if shorter else \
_("yesterday at %(time)s")
elif days < 5:
format = _("%(weekday)s") if shorter else \
_("%(weekday)s at %(time)s")
elif days < 334: # 11mo, since confusing for same month last year
format = _("%(month_name)s %(day)s") if shorter else \
_("%(month_name)s %(day)s at %(time)s")
if format is None:
format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
_("%(month_name)s %(day)s, %(year)s at %(time)s")
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
if tfhour_clock:
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
(u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
local_date.hour % 12 or 12, local_date.minute,
("am", "pm")[local_date.hour >= 12])
return format % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
def format_day(self, date, gmt_offset=0, dow=True):
"""Formats the given date as a day of week.
Example: "Monday, January 22". You can remove the day of week with
``dow=False``.
"""
local_date = date - datetime.timedelta(minutes=gmt_offset)
_ = self.translate
if dow:
return _("%(weekday)s, %(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
}
else:
return _("%(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"day": str(local_date.day),
}
def list(self, parts):
"""Returns a comma-separated list for the given list of parts.
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
of size 1.
"""
_ = self.translate
if len(parts) == 0:
return ""
if len(parts) == 1:
return parts[0]
comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ")
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
}
def friendly_number(self, value):
"""Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"):
return str(value)
value = str(value)
parts = []
while value:
parts.append(value[-3:])
value = value[:-3]
return ",".join(reversed(parts))
class CSVLocale(Locale):
"""Locale implementation using tornado's CSV translation format."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
class GettextLocale(Locale):
"""Locale implementation using the `gettext` module."""
def __init__(self, code, translations):
try:
# python 2
self.ngettext = translations.ungettext
self.gettext = translations.ugettext
except AttributeError:
# python 3
self.ngettext = translations.ngettext
self.gettext = translations.gettext
# self.gettext must exist before __init__ is called, since it
# calls into self.translate
super(GettextLocale, self).__init__(code, translations)
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
return self.ngettext(message, plural_message, count)
else:
return self.gettext(message)
LOCALE_NAMES = {
"af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
"am_ET": {"name_en": u("Amharic"), "name": u('\u12a0\u121b\u122d\u129b')},
"ar_AR": {"name_en": u("Arabic"), "name": u("\u0627\u0644\u0639\u0631\u0628\u064a\u0629")},
"bg_BG": {"name_en": u("Bulgarian"), "name": u("\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438")},
"bn_IN": {"name_en": u("Bengali"), "name": u("\u09ac\u09be\u0982\u09b2\u09be")},
"bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
"ca_ES": {"name_en": u("Catalan"), "name": u("Catal\xe0")},
"cs_CZ": {"name_en": u("Czech"), "name": u("\u010ce\u0161tina")},
"cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
"da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
"de_DE": {"name_en": u("German"), "name": u("Deutsch")},
"el_GR": {"name_en": u("Greek"), "name": u("\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac")},
"en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
"en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
"es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Espa\xf1ol (Espa\xf1a)")},
"es_LA": {"name_en": u("Spanish"), "name": u("Espa\xf1ol")},
"et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
"eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
"fa_IR": {"name_en": u("Persian"), "name": u("\u0641\u0627\u0631\u0633\u06cc")},
"fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
"fr_CA": {"name_en": u("French (Canada)"), "name": u("Fran\xe7ais (Canada)")},
"fr_FR": {"name_en": u("French"), "name": u("Fran\xe7ais")},
"ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
"gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
"he_IL": {"name_en": u("Hebrew"), "name": u("\u05e2\u05d1\u05e8\u05d9\u05ea")},
"hi_IN": {"name_en": u("Hindi"), "name": u("\u0939\u093f\u0928\u094d\u0926\u0940")},
"hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
"hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
"id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
"is_IS": {"name_en": u("Icelandic"), "name": u("\xcdslenska")},
"it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
"ja_JP": {"name_en": u("Japanese"), "name": u("\u65e5\u672c\u8a9e")},
"ko_KR": {"name_en": u("Korean"), "name": u("\ud55c\uad6d\uc5b4")},
"lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvi\u0173")},
"lv_LV": {"name_en": u("Latvian"), "name": u("Latvie\u0161u")},
"mk_MK": {"name_en": u("Macedonian"), "name": u("\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438")},
"ml_IN": {"name_en": u("Malayalam"), "name": u("\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02")},
"ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
"nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokm\xe5l)")},
"nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
"nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
"pa_IN": {"name_en": u("Punjabi"), "name": u("\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40")},
"pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
"pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Portugu\xeas (Brasil)")},
"pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Portugu\xeas (Portugal)")},
"ro_RO": {"name_en": u("Romanian"), "name": u("Rom\xe2n\u0103")},
"ru_RU": {"name_en": u("Russian"), "name": u("\u0420\u0443\u0441\u0441\u043a\u0438\u0439")},
"sk_SK": {"name_en": u("Slovak"), "name": u("Sloven\u010dina")},
"sl_SI": {"name_en": u("Slovenian"), "name": u("Sloven\u0161\u010dina")},
"sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
"sr_RS": {"name_en": u("Serbian"), "name": u("\u0421\u0440\u043f\u0441\u043a\u0438")},
"sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
"sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
"ta_IN": {"name_en": u("Tamil"), "name": u("\u0ba4\u0bae\u0bbf\u0bb4\u0bcd")},
"te_IN": {"name_en": u("Telugu"), "name": u("\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41")},
"th_TH": {"name_en": u("Thai"), "name": u("\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22")},
"tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
"tr_TR": {"name_en": u("Turkish"), "name": u("T\xfcrk\xe7e")},
"uk_UA": {"name_en": u("Ukraini "), "name": u("\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")},
"vi_VN": {"name_en": u("Vietnamese"), "name": u("Ti\u1ebfng Vi\u1ec7t")},
"zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("\u4e2d\u6587(\u7b80\u4f53)")},
"zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("\u4e2d\u6587(\u7e41\u9ad4)")},
}
|
terabit-software/dynamic-stream-server
|
refs/heads/master
|
dss/tools/ffmpeg.py
|
2
|
from __future__ import absolute_import
import shlex
from ..config import config
bin_default = config.get('ffmpeg', 'bin')
probe = config.get('ffmpeg', 'probe')
def _input_cmd(cmd_input, input, add_probe=True, bin=None, add_bin=True):
""" Base of FFmpeg command with a single input.
"""
if add_bin:
args = [bin_default if bin is None else bin]
else:
args = []
if cmd_input is None:
raise ValueError('Passing `None` on `cmd_input` will cause '
'shlex.split to hang instead of raising error.')
args += shlex.split(cmd_input)
if add_probe:
args += ['-probesize', probe]
args += ['-i', input]
return args
def cmd(cmd_input, input, cmd_output, output, add_probe=True, bin=None):
""" Build FFmpeg command for a single input and single output.
"""
args = _input_cmd(cmd_input, input, add_probe, bin)
args += shlex.split(cmd_output)
args.append(output)
return args
def cmd_inputs(cmd_input, inputs, cmd_output, output, add_probe=True, bin=None):
""" Build FFmpeg command for multiple input files and a single output.
If an item on the `input` list is a 2-item tuple, it will be unpacked into
input command for this input and the input.
E.g.: ['audio_file.mp4', ('-f mpegts', 'video_stream')]
"""
args = []
cmd_input_ = cmd_input
for ix, inp in enumerate(inputs):
if cmd_input is None:
cmd_input_, inp = inp
if isinstance(inp, tuple):
cmd_input_ += ' ' + inp[0]
inp = inp[1]
args += _input_cmd(cmd_input_, inp, add_probe, bin, add_bin=not ix)
cmd_input_ = cmd_input
args += shlex.split(cmd_output)
args.append(output)
return args
def cmd_outputs(cmd_input, input, base_cmd_output, cmd_output_specific, outputs, add_probe=True, bin=None):
""" Build FFmpeg command for multiple outputs but single input.
"""
args = _input_cmd(cmd_input, input, add_probe, bin)
base_cmd_output = shlex.split(base_cmd_output)
for out_cmd, out in zip(cmd_output_specific, outputs):
args += base_cmd_output
args += shlex.split(out_cmd)
args.append(out)
return args
def cmd_inputs_outputs(cmd_input, inputs, base_cmd_output, cmd_output_specific, outputs, add_probe=True, bin=None):
""" Build FFmpeg command for multiple input files and a multiple outputs.
If an item on the `input` list is a 2-item tuple, it will be unpacked into
input command for this input and the input.
E.g.: ['audio_file.mp4', ('-f mpegts', 'video_stream')]
"""
args = []
cmd_input_ = cmd_input
for ix, inp in enumerate(inputs):
if cmd_input is None:
cmd_input_, inp = inp
if isinstance(inp, tuple):
cmd_input_ += ' ' + inp[0]
inp = inp[1]
args += _input_cmd(cmd_input_, inp, add_probe, bin, add_bin=not ix)
cmd_input_ = cmd_input
base_cmd_output = shlex.split(base_cmd_output)
for out_cmd, out in zip(cmd_output_specific, outputs):
args += base_cmd_output
args += shlex.split(out_cmd)
args.append(out)
return args
|
GSDan/CrumbsOfWisdom
|
refs/heads/master
|
biscuitBox/main.py
|
1
|
import os
import random
import errno
import requests
import picamera
import sys
import threading
import subprocess
from PIL import Image, ImageEnhance
import RPi.GPIO as GPIO
from time import sleep, time
# GPIO pins
gLight = 16 # Light sensor
gCamBut = 15 # Take photo button
gSkipBut = 22 # Skip question button
gLED = 7 # LED
thisDir = os.path.dirname(os.path.abspath(__file__))
downloadsFolder = thisDir + "/QuestionFiles"
serverAddress = "http://138.68.133.209:1337/"
pollIntervalMinutes = 1
currentLightLevel = 0;
closedLightLevel = 100;
lastQuestionId = "";
lastPlayed = "";
canTakePhoto = False;
hasTakenPhoto = False;
tempImageFile = thisDir + "/image.jpg"
lock = threading.Lock()
camera = picamera.PiCamera()
camera.sharpness = 30
camera.contrast = 100
camera.saturation = -100
camera.rotation = 90
def TakeAndCropPhoto():
global camera
camera.capture(tempImageFile)
subprocess.call(['mplayer', thisDir + "/camera-snap.wav"])
# Crop image& up the contrast
original = Image.open(tempImageFile)
contrast = ImageEnhance.Contrast(original)
original = contrast.enhance(3)
width, height = original.size
left = int(width/3.6)
top = int(height/12)
right = int(width - width/7)
bottom = int(height - height/12)
original.crop((left, top, right, bottom)).save(tempImageFile)
def Error(e):
print e
subprocess.call(['mplayer', thisDir + "/error.mp3"])
#GPIO.cleanup()
#os._exit()
def UploadImage():
print "Uploading " + tempImageFile
try:
with open(tempImageFile, 'rb') as payload:
files = {"image" : payload}
data = {"questionId" : lastQuestionId }
res = requests.post(serverAddress + "advice/upload", files = files, data = data)
if res.status_code == 200:
print "UPLOAD RETURN: " + str(res.json())
subprocess.call(['mplayer', thisDir + "/success.mp3"])
return True
else:
print "Upload failed!!"
return False
except requests.exceptions.RequestException as e: # This is the correct syntax
Error(e);
return False
# Checks for new questions on the server
# If a question doesn't exist locally, download it
# RUN ON A SEPARATE THREAD
def RefreshQuestions():
while True:
print "POLLING SERVER"
try:
res = requests.get(serverAddress + "question/getnew")
RemoveUnwantedFiles(res.json())
for question in res.json():
localPath = os.path.join(downloadsFolder, os.path.basename(str(question["id"]) + ".mp3"))
# Download the file if it doesn't exist locally
if not os.path.isfile(localPath):
params = {"fd" : question["filename"] }
r = requests.get(serverAddress + "file/download", params=params, stream=True)
print "DOWNLOADING:", question["filename"]
with open(localPath, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
print "FINISHED:", localPath
else:
print "ALREADY CACHED:", localPath
except requests.exceptions.RequestException as e: # This is the correct syntax
print e
sleep(60 * pollIntervalMinutes)
def RemoveUnwantedFiles(questions):
for fn in os.listdir(downloadsFolder):
found = False
thisPath = os.path.join(downloadsFolder, fn)
print "Checking downloaded file", thisPath
for question in questions:
localPath = os.path.join(downloadsFolder, os.path.basename(str(question["id"]) + ".mp3"))
if localPath == thisPath:
found = True
break
if not found:
print "Deleting", thisPath
try:
os.remove(lastPlayed)
except OSError:
pass
# Checks the current light level and reports the average
# readings over a second long window.
# RUN ON A SEPARATE THREAD
def CheckLightLevels():
global currentLightLevel
try:
window = [0,0,0,0,0] # 1 second window
lastReading = 0
GPIO.setup(gLED, GPIO.OUT)
while True:
# Ground the pin to empty the capacitor
GPIO.setup(gLight, GPIO.OUT)
GPIO.output(gLight, GPIO.LOW)
sleep(0.2)
# Set as an input
GPIO.setup(gLight, GPIO.IN)
startTime = time()
while GPIO.input(gLight) == GPIO.LOW: pass
elapsed = int((time() - startTime) * 1000000)
# Add data to rolling window
window.append(elapsed)
del window[0]
with lock:
# Rest of the program is given the window's average
currentLightLevel = (sum(window) / float(len(window)))
# Update the LED - light if there are files available
GPIO.output(gLED, len(os.listdir(downloadsFolder)));
finally:
Error("Light level err");
# Checks whether or not the buttons have been pressed
# If gCamBut has been and if permitted, take a photo and upload it
# If gSkipBut has been pressed, skip the last played question and play the next if able
def CheckButtonStatus(pressedPin):
global canTakePhoto
global hasTakenPhoto
global lastQuestionId
try:
camButCurrState = GPIO.input (gCamBut)
if pressedPin == gCamBut:
if canTakePhoto and not hasTakenPhoto:
print "Taking photo"
TakeAndCropPhoto()
hasTakenPhoto = True
success = UploadImage()
if success:
print "Deleting file:", lastPlayed
try:
os.remove(lastPlayed)
except OSError:
pass
lastQuestionId = ""
else:
print "Can't take a photo right now"
elif skipButCurrState == gSkipBut:
print "Skip button!"
if lastQuestionId != "":
data = {"questionId" : lastQuestionId }
res = requests.post(serverAddress + "question/dismiss", data = data)
os.remove(lastPlayed)
if os.listdir(downloadsFolder):
PlayQuestion()
else:
print "No more messages"
except Exception as e:
print str(e)
# Play a random downloaded question audio file
def PlayQuestion():
global canTakePhoto
global hasTakenPhoto
global lastPlayed
global lastQuestionId
canTakePhoto = False
hasTakenPhoto = False
thisQ = random.choice(os.listdir(downloadsFolder))
lastPlayed = os.path.join(downloadsFolder, thisQ)
lastQuestionId = os.path.splitext(thisQ)[0]
subprocess.call(['mplayer', lastPlayed])
subprocess.call(['mplayer', thisDir + "/takePhoto.mp3"])
try:
GPIO.setmode(GPIO.BOARD)
try:
# Make the downloads folder if it doesn't exist
os.makedirs(downloadsFolder)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
print "Setting volume to 90%"
subprocess.call(["amixer", "cset", "numid=1", "--", "90%"])
# CREATE ALL THE THREADS!!
# Check the server on a separate thread
serverPollThread = threading.Thread(name="biscuitServer", target=RefreshQuestions)
serverPollThread.setDaemon(True)
serverPollThread.start()
# Measure the light level on a separate thread
lightLevelThread = threading.Thread(name="biscuitLight", target=CheckLightLevels)
lightLevelThread.setDaemon(True)
lightLevelThread.start()
GPIO.setup (gCamBut, GPIO.IN, GPIO.PUD_UP)
GPIO.add_event_detect(gCamBut, GPIO.RISING)
GPIO.add_event_callback(gCamBut, CheckButtonStatus)
GPIO.setup (gSkipBut, GPIO.IN, GPIO.PUD_UP)
GPIO.add_event_detect(gSkipBut, GPIO.RISING)
GPIO.add_event_callback(gSkipBut, CheckButtonStatus)
# If the light level shows the box is open, play an audio message
# Only play the message again after the box has been closed
hasPlayed = False
subprocess.call(['mplayer', thisDir + "/start.mp3"])
while True:
sleep(1.5)
with lock:
tinOpen = (currentLightLevel > closedLightLevel)
if not tinOpen:
if hasPlayed:
canTakePhoto = True
hasPlayed = False
if tinOpen and not hasPlayed and os.listdir(downloadsFolder):
hasPlayed = True
PlayQuestion()
finally:
print "Finish"
GPIO.cleanup()
|
jbest/brisc
|
refs/heads/master
|
manage.py
|
1
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rapid_web.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
UOMx/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_password_history.py
|
128
|
"""
This file will test through the LMS some of the PasswordHistory features
"""
import json
from mock import patch
from uuid import uuid4
from nose.plugins.attrib import attr
from django.contrib.auth.models import User
from django.utils import timezone
from datetime import timedelta
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth.tokens import default_token_generator
from django.utils.http import int_to_base36
from freezegun import freeze_time
from student.models import PasswordHistory
from courseware.tests.helpers import LoginEnrollmentTestCase
@attr('shard_1')
@patch.dict("django.conf.settings.FEATURES", {'ADVANCED_SECURITY': True})
class TestPasswordHistory(LoginEnrollmentTestCase):
"""
Go through some of the PasswordHistory use cases
"""
def _login(self, email, password, should_succeed=True, err_msg_check=None):
"""
Override the base implementation so we can do appropriate asserts
"""
resp = self.client.post(reverse('login'), {'email': email, 'password': password})
data = json.loads(resp.content)
self.assertEqual(resp.status_code, 200)
if should_succeed:
self.assertTrue(data['success'])
else:
self.assertFalse(data['success'])
if err_msg_check:
self.assertIn(err_msg_check, data['value'])
def _setup_user(self, is_staff=False, password=None):
"""
Override the base implementation to randomize the email
"""
email = 'foo_{0}@test.com'.format(uuid4().hex[:8])
password = password if password else 'foo'
username = 'test_{0}'.format(uuid4().hex[:8])
self.create_account(username, email, password)
self.activate_user(email)
# manually twiddle the is_staff bit, if needed
if is_staff:
user = User.objects.get(email=email)
user.is_staff = True
user.save()
return email, password
def _update_password(self, email, new_password):
"""
Helper method to reset a password
"""
user = User.objects.get(email=email)
user.set_password(new_password)
user.save()
history = PasswordHistory()
history.create(user)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS': None})
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS': None})
def test_no_forced_password_change(self):
"""
Makes sure default behavior is correct when we don't have this turned on
"""
email, password = self._setup_user()
self._login(email, password)
email, password = self._setup_user(is_staff=True)
self._login(email, password)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS': 1})
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS': 5})
def test_forced_password_change(self):
"""
Make sure password are viewed as expired in LMS after the policy time has elapsed
"""
student_email, student_password = self._setup_user()
staff_email, staff_password = self._setup_user(is_staff=True)
self._login(student_email, student_password)
self._login(staff_email, staff_password)
staff_reset_time = timezone.now() + timedelta(days=1)
with freeze_time(staff_reset_time):
self._login(student_email, student_password)
# staff should fail because password expired
self._login(staff_email, staff_password, should_succeed=False,
err_msg_check="Your password has expired due to password policy on this account")
# if we reset the password, we should be able to log in
self._update_password(staff_email, "updated")
self._login(staff_email, "updated")
student_reset_time = timezone.now() + timedelta(days=5)
with freeze_time(student_reset_time):
# Both staff and student logins should fail because user must
# reset the password
self._login(student_email, student_password, should_succeed=False,
err_msg_check="Your password has expired due to password policy on this account")
self._update_password(student_email, "updated")
self._login(student_email, "updated")
self._login(staff_email, staff_password, should_succeed=False,
err_msg_check="Your password has expired due to password policy on this account")
self._update_password(staff_email, "updated2")
self._login(staff_email, "updated2")
def test_allow_all_password_reuse(self):
"""
Tests that password_reset flows work as expected if reuse config is missing, meaning
passwords can always be reused
"""
student_email, _ = self._setup_user()
user = User.objects.get(email=student_email)
err_msg = 'You are re-using a password that you have used recently.'
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo'
}, follow=True)
self.assertNotIn(
err_msg,
resp.content
)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE': 1})
def test_student_password_reset_reuse(self):
"""
Goes through the password reset flows to make sure the various password reuse policies are enforced
"""
student_email, _ = self._setup_user()
user = User.objects.get(email=student_email)
err_msg = 'You are re-using a password that you have used recently. You must have 1 distinct password'
success_msg = 'Your Password Reset is Complete'
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo'
}, follow=True)
self.assertIn(
err_msg,
resp.content
)
# now retry with a different password
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'bar',
'new_password2': 'bar'
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE': 2})
def test_staff_password_reset_reuse(self):
"""
Goes through the password reset flows to make sure the various password reuse policies are enforced
"""
staff_email, _ = self._setup_user(is_staff=True)
user = User.objects.get(email=staff_email)
err_msg = 'You are re-using a password that you have used recently. You must have 2 distinct passwords'
success_msg = 'Your Password Reset is Complete'
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertIn(
err_msg,
resp.content
)
# now use different one
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'bar',
'new_password2': 'bar',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
# now try again with the first one
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
# should be rejected
self.assertIn(
err_msg,
resp.content
)
# now use different one
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'baz',
'new_password2': 'baz',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
# now we should be able to reuse the first one
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS': 1})
def test_password_reset_frequency_limit(self):
"""
Asserts the frequency limit on how often we can change passwords
"""
staff_email, _ = self._setup_user(is_staff=True)
success_msg = 'Your Password Reset is Complete'
# try to reset password, it should fail
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertNotIn(
success_msg,
resp.content
)
# pretend we're in the future
staff_reset_time = timezone.now() + timedelta(days=1)
with freeze_time(staff_reset_time):
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
@patch.dict("django.conf.settings.FEATURES", {'ENFORCE_PASSWORD_POLICY': True})
@override_settings(PASSWORD_MIN_LENGTH=6)
def test_password_policy_on_password_reset(self):
"""
This makes sure the proper asserts on password policy also works on password reset
"""
staff_email, _ = self._setup_user(is_staff=True, password='foofoo')
success_msg = 'Your Password Reset is Complete'
# try to reset password, it should fail
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertNotIn(
success_msg,
resp.content
)
# try to reset password with a long enough password
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foofoo',
'new_password2': 'foofoo',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
|
guijomatos/SickRage
|
refs/heads/master
|
lib/hachoir_metadata/timezone.py
|
185
|
from datetime import tzinfo, timedelta
class TimezoneUTC(tzinfo):
"""UTC timezone"""
ZERO = timedelta(0)
def utcoffset(self, dt):
return TimezoneUTC.ZERO
def tzname(self, dt):
return u"UTC"
def dst(self, dt):
return TimezoneUTC.ZERO
def __repr__(self):
return "<TimezoneUTC delta=0, name=u'UTC'>"
class Timezone(TimezoneUTC):
"""Fixed offset in hour from UTC."""
def __init__(self, offset):
self._offset = timedelta(minutes=offset*60)
self._name = u"%+03u00" % offset
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def __repr__(self):
return "<Timezone delta=%s, name='%s'>" % (
self._offset, self._name)
UTC = TimezoneUTC()
def createTimezone(offset):
if offset:
return Timezone(offset)
else:
return UTC
|
V11/volcano
|
refs/heads/master
|
server/sqlmap/tamper/nonrecursivereplacement.py
|
10
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import random
import re
from lib.core.common import singleTimeWarnMessage
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def tamper(payload, **kwargs):
"""
Replaces predefined SQL keywords with representations
suitable for replacement (e.g. .replace("SELECT", "")) filters
Notes:
* Useful to bypass very weak custom filters
>>> random.seed(0)
>>> tamper('1 UNION SELECT 2--')
'1 UNIOUNIONN SELESELECTCT 2--'
"""
keywords = ("UNION", "SELECT", "INSERT", "UPDATE", "FROM", "WHERE")
retVal = payload
warnMsg = "currently only couple of keywords are being processed %s. " % str(keywords)
warnMsg += "You can set it manually according to your needs"
singleTimeWarnMessage(warnMsg)
if payload:
for keyword in keywords:
_ = random.randint(1, len(keyword) - 1)
retVal = re.sub(r"(?i)\b%s\b" % keyword, "%s%s%s" % (keyword[:_], keyword, keyword[_:]), retVal)
return retVal
|
jeremiahyan/odoo
|
refs/heads/master
|
addons/account/models/account_journal_dashboard.py
|
2
|
import json
from datetime import datetime, timedelta
from babel.dates import format_datetime, format_date
from odoo import models, api, _, fields
from odoo.osv import expression
from odoo.release import version
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from odoo.tools.misc import formatLang, format_date as odoo_format_date, get_lang
import random
import ast
class account_journal(models.Model):
_inherit = "account.journal"
def _kanban_dashboard(self):
for journal in self:
journal.kanban_dashboard = json.dumps(journal.get_journal_dashboard_datas())
def _kanban_dashboard_graph(self):
for journal in self:
if (journal.type in ['sale', 'purchase']):
journal.kanban_dashboard_graph = json.dumps(journal.get_bar_graph_datas())
elif (journal.type in ['cash', 'bank']):
journal.kanban_dashboard_graph = json.dumps(journal.get_line_graph_datas())
else:
journal.kanban_dashboard_graph = False
def _get_json_activity_data(self):
for journal in self:
activities = []
# search activity on move on the journal
sql_query = '''
SELECT act.id,
act.res_id,
act.res_model,
act.summary,
act_type.name as act_type_name,
act_type.category as activity_category,
act.date_deadline,
m.date,
m.ref,
CASE WHEN act.date_deadline < CURRENT_DATE THEN 'late' ELSE 'future' END as status
FROM account_move m
LEFT JOIN mail_activity act ON act.res_id = m.id
LEFT JOIN mail_activity_type act_type ON act.activity_type_id = act_type.id
WHERE act.res_model = 'account.move'
AND m.journal_id = %s
'''
self.env.cr.execute(sql_query, (journal.id,))
for activity in self.env.cr.dictfetchall():
act = {
'id': activity.get('id'),
'res_id': activity.get('res_id'),
'res_model': activity.get('res_model'),
'status': activity.get('status'),
'name': (activity.get('summary') or activity.get('act_type_name')),
'activity_category': activity.get('activity_category'),
'date': odoo_format_date(self.env, activity.get('date_deadline'))
}
if activity.get('activity_category') == 'tax_report' and activity.get('res_model') == 'account.move':
act['name'] = activity.get('ref')
activities.append(act)
journal.json_activity_data = json.dumps({'activities': activities})
kanban_dashboard = fields.Text(compute='_kanban_dashboard')
kanban_dashboard_graph = fields.Text(compute='_kanban_dashboard_graph')
json_activity_data = fields.Text(compute='_get_json_activity_data')
show_on_dashboard = fields.Boolean(string='Show journal on dashboard', help="Whether this journal should be displayed on the dashboard or not", default=True)
color = fields.Integer("Color Index", default=0)
def _graph_title_and_key(self):
if self.type in ['sale', 'purchase']:
return ['', _('Residual amount')]
elif self.type == 'cash':
return ['', _('Cash: Balance')]
elif self.type == 'bank':
return ['', _('Bank: Balance')]
# Below method is used to get data of bank and cash statemens
def get_line_graph_datas(self):
"""Computes the data used to display the graph for bank and cash journals in the accounting dashboard"""
currency = self.currency_id or self.company_id.currency_id
def build_graph_data(date, amount):
#display date in locale format
name = format_date(date, 'd LLLL Y', locale=locale)
short_name = format_date(date, 'd MMM', locale=locale)
return {'x':short_name,'y': amount, 'name':name}
self.ensure_one()
BankStatement = self.env['account.bank.statement']
data = []
today = datetime.today()
last_month = today + timedelta(days=-30)
locale = get_lang(self.env).code
#starting point of the graph is the last statement
last_stmt = self._get_last_bank_statement(domain=[('move_id.state', '=', 'posted')])
last_balance = last_stmt and last_stmt.balance_end_real or 0
data.append(build_graph_data(today, last_balance))
#then we subtract the total amount of bank statement lines per day to get the previous points
#(graph is drawn backward)
date = today
amount = last_balance
query = '''
SELECT move.date, sum(st_line.amount) as amount
FROM account_bank_statement_line st_line
JOIN account_move move ON move.id = st_line.move_id
WHERE move.journal_id = %s
AND move.date > %s
AND move.date <= %s
GROUP BY move.date
ORDER BY move.date desc
'''
self.env.cr.execute(query, (self.id, last_month, today))
query_result = self.env.cr.dictfetchall()
for val in query_result:
date = val['date']
if date != today.strftime(DF): # make sure the last point in the graph is today
data[:0] = [build_graph_data(date, amount)]
amount = currency.round(amount - val['amount'])
# make sure the graph starts 1 month ago
if date.strftime(DF) != last_month.strftime(DF):
data[:0] = [build_graph_data(last_month, amount)]
[graph_title, graph_key] = self._graph_title_and_key()
color = '#875A7B' if 'e' in version else '#7c7bad'
is_sample_data = not last_stmt and len(query_result) == 0
if is_sample_data:
data = []
for i in range(30, 0, -5):
current_date = today + timedelta(days=-i)
data.append(build_graph_data(current_date, random.randint(-5, 15)))
return [{'values': data, 'title': graph_title, 'key': graph_key, 'area': True, 'color': color, 'is_sample_data': is_sample_data}]
def get_bar_graph_datas(self):
data = []
today = fields.Datetime.now(self)
data.append({'label': _('Due'), 'value':0.0, 'type': 'past'})
day_of_week = int(format_datetime(today, 'e', locale=get_lang(self.env).code))
first_day_of_week = today + timedelta(days=-day_of_week+1)
for i in range(-1,4):
if i==0:
label = _('This Week')
elif i==3:
label = _('Not Due')
else:
start_week = first_day_of_week + timedelta(days=i*7)
end_week = start_week + timedelta(days=6)
if start_week.month == end_week.month:
label = str(start_week.day) + '-' + str(end_week.day) + ' ' + format_date(end_week, 'MMM', locale=get_lang(self.env).code)
else:
label = format_date(start_week, 'd MMM', locale=get_lang(self.env).code) + '-' + format_date(end_week, 'd MMM', locale=get_lang(self.env).code)
data.append({'label':label,'value':0.0, 'type': 'past' if i<0 else 'future'})
# Build SQL query to find amount aggregated by week
(select_sql_clause, query_args) = self._get_bar_graph_select_query()
query = ''
start_date = (first_day_of_week + timedelta(days=-7))
for i in range(0,6):
if i == 0:
query += "("+select_sql_clause+" and invoice_date_due < '"+start_date.strftime(DF)+"')"
elif i == 5:
query += " UNION ALL ("+select_sql_clause+" and invoice_date_due >= '"+start_date.strftime(DF)+"')"
else:
next_date = start_date + timedelta(days=7)
query += " UNION ALL ("+select_sql_clause+" and invoice_date_due >= '"+start_date.strftime(DF)+"' and invoice_date_due < '"+next_date.strftime(DF)+"')"
start_date = next_date
self.env.cr.execute(query, query_args)
query_results = self.env.cr.dictfetchall()
is_sample_data = True
for index in range(0, len(query_results)):
if query_results[index].get('aggr_date') != None:
is_sample_data = False
data[index]['value'] = query_results[index].get('total')
[graph_title, graph_key] = self._graph_title_and_key()
if is_sample_data:
for index in range(0, len(query_results)):
data[index]['type'] = 'o_sample_data'
# we use unrealistic values for the sample data
data[index]['value'] = random.randint(0, 20)
graph_key = _('Sample data')
return [{'values': data, 'title': graph_title, 'key': graph_key, 'is_sample_data': is_sample_data}]
def _get_bar_graph_select_query(self):
"""
Returns a tuple containing the base SELECT SQL query used to gather
the bar graph's data as its first element, and the arguments dictionary
for it as its second.
"""
sign = '' if self.type == 'sale' else '-'
return ('''
SELECT
''' + sign + ''' + SUM(move.amount_residual_signed) AS total,
MIN(invoice_date_due) AS aggr_date
FROM account_move move
WHERE move.journal_id = %(journal_id)s
AND move.state = 'posted'
AND move.payment_state in ('not_paid', 'partial')
AND move.move_type IN %(invoice_types)s
''', {
'invoice_types': tuple(self.env['account.move'].get_invoice_types(True)),
'journal_id': self.id
})
def get_journal_dashboard_datas(self):
currency = self.currency_id or self.company_id.currency_id
number_to_reconcile = number_to_check = last_balance = 0
has_at_least_one_statement = False
bank_account_balance = nb_lines_bank_account_balance = 0
outstanding_pay_account_balance = nb_lines_outstanding_pay_account_balance = 0
title = ''
number_draft = number_waiting = number_late = to_check_balance = 0
sum_draft = sum_waiting = sum_late = 0.0
if self.type in ('bank', 'cash'):
last_statement = self._get_last_bank_statement(
domain=[('move_id.state', '=', 'posted')])
last_balance = last_statement.balance_end
has_at_least_one_statement = bool(last_statement)
bank_account_balance, nb_lines_bank_account_balance = self._get_journal_bank_account_balance(
domain=[('move_id.state', '=', 'posted')])
outstanding_pay_account_balance, nb_lines_outstanding_pay_account_balance = self._get_journal_outstanding_payments_account_balance(
domain=[('move_id.state', '=', 'posted')])
self._cr.execute('''
SELECT COUNT(st_line.id)
FROM account_bank_statement_line st_line
JOIN account_move st_line_move ON st_line_move.id = st_line.move_id
JOIN account_bank_statement st ON st_line.statement_id = st.id
WHERE st_line_move.journal_id IN %s
AND st.state = 'posted'
AND NOT st_line.is_reconciled
''', [tuple(self.ids)])
number_to_reconcile = self.env.cr.fetchone()[0]
to_check_ids = self.to_check_ids()
number_to_check = len(to_check_ids)
to_check_balance = sum([r.amount for r in to_check_ids])
#TODO need to check if all invoices are in the same currency than the journal!!!!
elif self.type in ['sale', 'purchase']:
title = _('Bills to pay') if self.type == 'purchase' else _('Invoices owed to you')
self.env['account.move'].flush(['amount_residual', 'currency_id', 'move_type', 'invoice_date', 'company_id', 'journal_id', 'date', 'state', 'payment_state'])
(query, query_args) = self._get_open_bills_to_pay_query()
self.env.cr.execute(query, query_args)
query_results_to_pay = self.env.cr.dictfetchall()
(query, query_args) = self._get_draft_bills_query()
self.env.cr.execute(query, query_args)
query_results_drafts = self.env.cr.dictfetchall()
today = fields.Date.context_today(self)
query = '''
SELECT
(CASE WHEN move_type IN ('out_refund', 'in_refund') THEN -1 ELSE 1 END) * amount_residual AS amount_total,
currency_id AS currency,
move_type,
invoice_date,
company_id
FROM account_move move
WHERE journal_id = %s
AND date <= %s
AND state = 'posted'
AND payment_state in ('not_paid', 'partial')
AND move_type IN ('out_invoice', 'out_refund', 'in_invoice', 'in_refund', 'out_receipt', 'in_receipt');
'''
self.env.cr.execute(query, (self.id, today))
late_query_results = self.env.cr.dictfetchall()
curr_cache = {}
(number_waiting, sum_waiting) = self._count_results_and_sum_amounts(query_results_to_pay, currency, curr_cache=curr_cache)
(number_draft, sum_draft) = self._count_results_and_sum_amounts(query_results_drafts, currency, curr_cache=curr_cache)
(number_late, sum_late) = self._count_results_and_sum_amounts(late_query_results, currency, curr_cache=curr_cache)
read = self.env['account.move'].read_group([('journal_id', '=', self.id), ('to_check', '=', True)], ['amount_total'], 'journal_id', lazy=False)
if read:
number_to_check = read[0]['__count']
to_check_balance = read[0]['amount_total']
elif self.type == 'general':
read = self.env['account.move'].read_group([('journal_id', '=', self.id), ('to_check', '=', True)], ['amount_total'], 'journal_id', lazy=False)
if read:
number_to_check = read[0]['__count']
to_check_balance = read[0]['amount_total']
is_sample_data = self.kanban_dashboard_graph and any(data.get('is_sample_data', False) for data in json.loads(self.kanban_dashboard_graph))
return {
'number_to_check': number_to_check,
'to_check_balance': formatLang(self.env, to_check_balance, currency_obj=currency),
'number_to_reconcile': number_to_reconcile,
'account_balance': formatLang(self.env, currency.round(bank_account_balance), currency_obj=currency),
'has_at_least_one_statement': has_at_least_one_statement,
'nb_lines_bank_account_balance': nb_lines_bank_account_balance,
'outstanding_pay_account_balance': formatLang(self.env, currency.round(outstanding_pay_account_balance), currency_obj=currency),
'nb_lines_outstanding_pay_account_balance': nb_lines_outstanding_pay_account_balance,
'last_balance': formatLang(self.env, currency.round(last_balance) + 0.0, currency_obj=currency),
'number_draft': number_draft,
'number_waiting': number_waiting,
'number_late': number_late,
'sum_draft': formatLang(self.env, currency.round(sum_draft) + 0.0, currency_obj=currency),
'sum_waiting': formatLang(self.env, currency.round(sum_waiting) + 0.0, currency_obj=currency),
'sum_late': formatLang(self.env, currency.round(sum_late) + 0.0, currency_obj=currency),
'currency_id': currency.id,
'bank_statements_source': self.bank_statements_source,
'title': title,
'is_sample_data': is_sample_data,
'company_count': len(self.env.companies)
}
def _get_open_bills_to_pay_query(self):
"""
Returns a tuple containing the SQL query used to gather the open bills
data as its first element, and the arguments dictionary to use to run
it as its second.
"""
return ('''
SELECT
(CASE WHEN move.move_type IN ('out_refund', 'in_refund') THEN -1 ELSE 1 END) * move.amount_residual AS amount_total,
move.currency_id AS currency,
move.move_type,
move.invoice_date,
move.company_id
FROM account_move move
WHERE move.journal_id = %(journal_id)s
AND move.state = 'posted'
AND move.payment_state in ('not_paid', 'partial')
AND move.move_type IN ('out_invoice', 'out_refund', 'in_invoice', 'in_refund', 'out_receipt', 'in_receipt');
''', {'journal_id': self.id})
def _get_draft_bills_query(self):
"""
Returns a tuple containing as its first element the SQL query used to
gather the bills in draft state data, and the arguments
dictionary to use to run it as its second.
"""
return ('''
SELECT
(CASE WHEN move.move_type IN ('out_refund', 'in_refund') THEN -1 ELSE 1 END) * move.amount_total AS amount_total,
move.currency_id AS currency,
move.move_type,
move.invoice_date,
move.company_id
FROM account_move move
WHERE move.journal_id = %(journal_id)s
AND move.state = 'draft'
AND move.payment_state in ('not_paid', 'partial')
AND move.move_type IN ('out_invoice', 'out_refund', 'in_invoice', 'in_refund', 'out_receipt', 'in_receipt');
''', {'journal_id': self.id})
def _count_results_and_sum_amounts(self, results_dict, target_currency, curr_cache=None):
""" Loops on a query result to count the total number of invoices and sum
their amount_total field (expressed in the given target currency).
amount_total must be signed !
"""
rslt_count = 0
rslt_sum = 0.0
# Create a cache with currency rates to avoid unnecessary SQL requests. Do not copy
# curr_cache on purpose, so the dictionary is modified and can be re-used for subsequent
# calls of the method.
curr_cache = {} if curr_cache is None else curr_cache
for result in results_dict:
cur = self.env['res.currency'].browse(result.get('currency'))
company = self.env['res.company'].browse(result.get('company_id')) or self.env.company
rslt_count += 1
date = result.get('invoice_date') or fields.Date.context_today(self)
amount = result.get('amount_total', 0) or 0
if cur != target_currency:
key = (cur, target_currency, company, date)
# Using setdefault will call _get_conversion_rate, so we explicitly check the
# existence of the key in the cache instead.
if key not in curr_cache:
curr_cache[key] = self.env['res.currency']._get_conversion_rate(*key)
amount *= curr_cache[key]
rslt_sum += target_currency.round(amount)
return (rslt_count, rslt_sum)
def action_create_new(self):
ctx = self._context.copy()
ctx['default_journal_id'] = self.id
if self.type == 'sale':
ctx['default_move_type'] = 'out_refund' if ctx.get('refund') else 'out_invoice'
elif self.type == 'purchase':
ctx['default_move_type'] = 'in_refund' if ctx.get('refund') else 'in_invoice'
else:
ctx['default_move_type'] = 'entry'
ctx['view_no_maturity'] = True
return {
'name': _('Create invoice/bill'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'account.move',
'view_id': self.env.ref('account.view_move_form').id,
'context': ctx,
}
def create_cash_statement(self):
ctx = self._context.copy()
ctx.update({'journal_id': self.id, 'default_journal_id': self.id, 'default_journal_type': 'cash'})
open_statements = self.env['account.bank.statement'].search([('journal_id', '=', self.id), ('state', '=', 'open')])
action = {
'name': _('Create cash statement'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'account.bank.statement',
'context': ctx,
}
if len(open_statements) == 1:
action.update({
'view_mode': 'form',
'res_id': open_statements.id,
})
elif len(open_statements) > 1:
action.update({
'view_mode': 'tree,form',
'domain': [('id', 'in', open_statements.ids)],
})
return action
def to_check_ids(self):
self.ensure_one()
domain = self.env['account.move.line']._get_suspense_moves_domain()
domain.append(('journal_id', '=', self.id))
statement_line_ids = self.env['account.move.line'].search(domain).mapped('statement_line_id')
return statement_line_ids
def _select_action_to_open(self):
self.ensure_one()
if self._context.get('action_name'):
return self._context.get('action_name')
elif self.type == 'bank':
return 'action_bank_statement_tree'
elif self.type == 'cash':
return 'action_view_bank_statement_tree'
elif self.type == 'sale':
return 'action_move_out_invoice_type'
elif self.type == 'purchase':
return 'action_move_in_invoice_type'
else:
return 'action_move_journal_line'
def open_action(self):
"""return action based on type for related journals"""
self.ensure_one()
action_name = self._select_action_to_open()
# Set 'account.' prefix if missing.
if not action_name.startswith("account."):
action_name = 'account.%s' % action_name
action = self.env["ir.actions.act_window"]._for_xml_id(action_name)
context = self._context.copy()
if 'context' in action and type(action['context']) == str:
context.update(ast.literal_eval(action['context']))
else:
context.update(action.get('context', {}))
action['context'] = context
action['context'].update({
'default_journal_id': self.id,
'search_default_journal_id': self.id,
})
domain_type_field = action['res_model'] == 'account.move.line' and 'move_id.move_type' or 'move_type' # The model can be either account.move or account.move.line
# Override the domain only if the action was not explicitly specified in order to keep the
# original action domain.
if not self._context.get('action_name'):
if self.type == 'sale':
action['domain'] = [(domain_type_field, 'in', ('out_invoice', 'out_refund', 'out_receipt'))]
elif self.type == 'purchase':
action['domain'] = [(domain_type_field, 'in', ('in_invoice', 'in_refund', 'in_receipt', 'entry'))]
return action
def open_spend_money(self):
return self.open_payments_action('outbound')
def open_collect_money(self):
return self.open_payments_action('inbound')
def open_transfer_money(self):
return self.open_payments_action('transfer')
def open_payments_action(self, payment_type, mode='tree'):
if payment_type == 'outbound':
action_ref = 'account.action_account_payments_payable'
elif payment_type == 'transfer':
action_ref = 'account.action_account_payments_transfer'
else:
action_ref = 'account.action_account_payments'
action = self.env['ir.actions.act_window']._for_xml_id(action_ref)
action['context'] = dict(ast.literal_eval(action.get('context')), default_journal_id=self.id, search_default_journal_id=self.id)
if payment_type == 'transfer':
action['context'].update({
'default_partner_id': self.company_id.partner_id.id,
'default_is_internal_transfer': True,
})
if mode == 'form':
action['views'] = [[False, 'form']]
return action
def open_action_with_context(self):
action_name = self.env.context.get('action_name', False)
if not action_name:
return False
ctx = dict(self.env.context, default_journal_id=self.id)
if ctx.get('search_default_journal', False):
ctx.update(search_default_journal_id=self.id)
ctx['search_default_journal'] = False # otherwise it will do a useless groupby in bank statements
ctx.pop('group_by', None)
action = self.env['ir.actions.act_window']._for_xml_id(f"account.{action_name}")
action['context'] = ctx
if ctx.get('use_domain', False):
action['domain'] = isinstance(ctx['use_domain'], list) and ctx['use_domain'] or ['|', ('journal_id', '=', self.id), ('journal_id', '=', False)]
action['name'] = _(
"%(action)s for journal %(journal)s",
action=action["name"],
journal=self.name,
)
return action
def create_bank_statement(self):
"""return action to create a bank statements. This button should be called only on journals with type =='bank'"""
action = self.env["ir.actions.actions"]._for_xml_id("account.action_bank_statement_tree")
action.update({
'views': [[False, 'form']],
'context': "{'default_journal_id': " + str(self.id) + "}",
})
return action
def create_customer_payment(self):
"""return action to create a customer payment"""
return self.open_payments_action('inbound', mode='form')
def create_supplier_payment(self):
"""return action to create a supplier payment"""
return self.open_payments_action('outbound', mode='form')
def create_internal_transfer(self):
"""return action to create a internal transfer"""
return self.open_payments_action('transfer', mode='form')
#####################
# Setup Steps Stuff #
#####################
def mark_bank_setup_as_done_action(self):
""" Marks the 'bank setup' step as done in the setup bar and in the company."""
self.company_id.sudo().set_onboarding_step_done('account_setup_bank_data_state')
def unmark_bank_setup_as_done_action(self):
""" Marks the 'bank setup' step as not done in the setup bar and in the company."""
self.company_id.account_setup_bank_data_state = 'not_done'
|
endticket/uwsgi
|
refs/heads/master
|
plugins/alarm_xmpp/uwsgiplugin.py
|
21
|
NAME = 'alarm_xmpp'
CFLAGS = []
LDFLAGS = []
LIBS = ['-lgloox']
GCC_LIST = ['alarm_xmpp_plugin', 'gloox.cc']
|
jeremyclover/airflow
|
refs/heads/master
|
airflow/jobs.py
|
2
|
from builtins import str
from past.builtins import basestring
from collections import defaultdict
from datetime import datetime
import getpass
import logging
import signal
import socket
import subprocess
import sys
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings, utils
from airflow.configuration import conf
from airflow.utils import AirflowException, State
Base = models.Base
ID_LEN = models.ID_LEN
# Setting up a statsd client if needed
statsd = None
if conf.getboolean('scheduler', 'statsd_on'):
from statsd import StatsClient
statsd = StatsClient(
host=conf.get('scheduler', 'statsd_host'),
port=conf.getint('scheduler', 'statsd_port'),
prefix=conf.get('scheduler', 'statsd_prefix'))
class BaseJob(Base):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getint('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.gethostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
logging.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
session.close()
self.heartbeat_callback()
logging.debug('[heart] Boom.')
def run(self):
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
if statsd:
statsd.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplemented("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
:param do_pickle: to pickle the DAG object and send over to workers
for non-local executors
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
do_pickle=False,
*args, **kwargs):
self.dag_id = dag_id
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
@utils.provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm += dag.schedule_interval
while dttm < datetime.now():
if dttm + task.sla + dag.schedule_interval < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm += dag.schedule_interval
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
blocking_tis = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
for ti in blocking_tis:
ti.task = dag.get_task(ti.task_id)
blocking_tis = ([ti for ti in blocking_tis
if ti.are_dependencies_met(main_session=session)])
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
from airflow import ascii
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{ascii.bug}<code></pre>
""".format(**locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
utils.send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
for sla in slas:
sla.email_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
def process_dag(self, dag, executor):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
DagModel = models.DagModel
session = settings.Session()
# picklin'
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
db_dag = session.query(
DagModel).filter(DagModel.dag_id == dag.dag_id).first()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (
datetime.now() - last_scheduler_run).total_seconds()
# if db_dag.scheduler_lock or
if secs_since_last < self.heartrate:
session.commit()
session.close()
return None
else:
# Taking a lock
db_dag.scheduler_lock = True
db_dag.last_scheduler_run = datetime.now()
session.commit()
TI = models.TaskInstance
logging.info(
"Getting latest instance "
"for all tasks in dag " + dag.dag_id)
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.group_by(TI.task_id).subquery('sq')
)
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
)
logging.debug("Querying max dates for each task")
latest_ti = qry.all()
ti_dict = {ti.task_id: ti for ti in latest_ti}
session.expunge_all()
session.commit()
logging.debug("{} rows returned".format(len(latest_ti)))
for task in dag.tasks:
if task.adhoc:
continue
if task.task_id not in ti_dict:
# Brand new task, let's get started
ti = TI(task, task.start_date)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.info(
'First run for {ti}'.format(**locals()))
executor.queue_task_instance(ti, pickle_id=pickle_id)
else:
ti = ti_dict[task.task_id]
ti.task = task # Hacky but worky
if ti.state == State.RUNNING:
continue # Only one task at a time
elif ti.state == State.UP_FOR_RETRY:
# If task instance if up for retry, make sure
# the retry delay is met
if ti.is_runnable():
logging.debug('Triggering retry: ' + str(ti))
executor.queue_task_instance(ti, pickle_id=pickle_id)
elif ti.state == State.QUEUED:
# If was queued we skipped so that in gets prioritized
# in self.prioritize_queued
continue
else:
# Trying to run the next schedule
next_schedule = (
ti.execution_date + task.schedule_interval)
if (
ti.task.end_date and
next_schedule > ti.task.end_date):
continue
ti = TI(
task=task,
execution_date=next_schedule,
)
ti.refresh_from_db()
if ti.is_queueable(flag_upstream_failed=True):
logging.debug('Queuing next run: ' + str(ti))
executor.queue_task_instance(ti, pickle_id=pickle_id)
# Releasing the lock
logging.debug("Unlocking DAG (scheduler_lock)")
db_dag = (
session.query(DagModel)
.filter(DagModel.dag_id == dag.dag_id)
.first()
)
db_dag.scheduler_lock = False
session.merge(db_dag)
session.commit()
session.close()
@utils.provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if (
ti.dag_id not in dagbag.dags or not
dagbag.dags[ti.dag_id].has_task(ti.task_id)):
# Deleting queued jobs that don't exist anymore
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
for pool, tis in list(d.items()):
open_slots = pools[pool].open_slots(session=session)
if open_slots > 0:
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis[:open_slots]:
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
logging.error("Queued task {} seems gone".format(ti))
session.delete(ti)
if task:
ti.task = task
# picklin'
dag = dagbag.dags[ti.dag_id]
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
if ti.are_dependencies_met():
executor.queue_task_instance(ti, force=True, pickle_id=pickle_id)
else:
session.delete(ti)
session.commit()
def _execute(self):
dag_id = self.dag_id
def signal_handler(signum, frame):
logging.error("SIGINT (ctrl-c) received")
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
utils.pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = dagbag.executor
executor.start()
i = 0
while not self.num_runs or self.num_runs > i:
try:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
logging.exception(e)
i += 1
try:
if i % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except:
logging.error("Failed at reloading the dagbag")
if statsd:
statsd.incr('dag_refresh_error', 1, 1)
sleep(5)
if dag_id:
dags = [dagbag.dags[dag_id]]
else:
dags = [
dag for dag in dagbag.dags.values() if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
for dag in dags:
logging.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag or (dag.dag_id in paused_dag_ids):
continue
try:
self.process_dag(dag, executor)
self.manage_slas(dag)
except Exception as e:
logging.exception(e)
logging.info(
"Done queuing tasks, calling the executor's heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
logging.info("Loop took: {} seconds".format(duration_sec))
try:
self.import_errors(dagbag)
except Exception as e:
logging.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
logging.exception(e)
logging.error("Tachycardia!")
except Exception as deep_e:
logging.exception(deep_e)
def heartbeat_callback(self):
if statsd:
statsd.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
*args, **kwargs):
self.dag = dag
dag.override_start_date(start_date)
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
# Build a list of all instances to run
tasks_to_run = {}
failed = []
succeeded = []
started = []
wont_run = []
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in utils.date_range(
start_date, end_date, task.dag.schedule_interval):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
# Triggering what is ready to get triggered
while tasks_to_run:
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS and key in tasks_to_run:
succeeded.append(key)
del tasks_to_run[key]
elif ti.is_runnable():
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
task_start_date=self.bf_start_date,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies)
ti.state = State.RUNNING
if key not in started:
started.append(key)
self.heartbeat()
executor.heartbeat()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
if ti.state in (State.FAILED, State.SKIPPED):
if ti.state == State.FAILED:
failed.append(key)
logging.error("Task instance " + str(key) + " failed")
elif ti.state == State.SKIPPED:
wont_run.append(key)
logging.error("Skipping " + str(key) + " failed")
del tasks_to_run[key]
# Removing downstream tasks that also shouldn't run
for t in self.dag.get_task(task_id).get_flat_relatives(
upstream=False):
key = (ti.dag_id, t.task_id, execution_date)
if key in tasks_to_run:
wont_run.append(key)
del tasks_to_run[key]
elif ti.state == State.SUCCESS:
succeeded.append(key)
del tasks_to_run[key]
msg = (
"[backfill progress] "
"waiting: {0} | "
"succeeded: {1} | "
"kicked_off: {2} | "
"failed: {3} | "
"wont_run: {4} ").format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(wont_run))
logging.info(msg)
executor.end()
session.close()
if failed:
raise AirflowException(
"Some tasks instances failed, here's the list:\n"+str(failed))
logging.info("All done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
force=False,
mark_success=False,
pickle_id=None,
task_start_date=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.force = force
self.pickle_id = pickle_id
self.mark_success = mark_success
self.task_start_date = task_start_date
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
task_start_date=self.task_start_date,
job_id=self.id,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
|
qzio/compose
|
refs/heads/master
|
tests/unit/split_buffer_test.py
|
54
|
from __future__ import unicode_literals
from __future__ import absolute_import
from compose.cli.utils import split_buffer
from .. import unittest
class SplitBufferTest(unittest.TestCase):
def test_single_line_chunks(self):
def reader():
yield b'abc\n'
yield b'def\n'
yield b'ghi\n'
self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi\n'])
def test_no_end_separator(self):
def reader():
yield b'abc\n'
yield b'def\n'
yield b'ghi'
self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi'])
def test_multiple_line_chunk(self):
def reader():
yield b'abc\ndef\nghi'
self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi'])
def test_chunked_line(self):
def reader():
yield b'a'
yield b'b'
yield b'c'
yield b'\n'
yield b'd'
self.assert_produces(reader, [b'abc\n', b'd'])
def test_preserves_unicode_sequences_within_lines(self):
string = u"a\u2022c\n".encode('utf-8')
def reader():
yield string
self.assert_produces(reader, [string])
def assert_produces(self, reader, expectations):
split = split_buffer(reader(), b'\n')
for (actual, expected) in zip(split, expectations):
self.assertEqual(type(actual), type(expected))
self.assertEqual(actual, expected)
|
macs03/demo-cms
|
refs/heads/master
|
cms/lib/python2.7/site-packages/cms/migrations/0014_sites_removed.py
|
525
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
rananda/cfme_tests
|
refs/heads/master
|
cfme/tests/cloud_infra_common/test_events.py
|
1
|
# -*- coding: utf-8 -*-
"""This module tests events that are invoked by Cloud/Infra VMs."""
import fauxfactory
import pytest
from cfme.common.vm import VM
from cfme.control.explorer.policy_profiles import PolicyProfile
from cfme.control.explorer.policies import VMControlPolicy
from cfme.control.explorer.actions import Action
from utils import testgen
from utils.appliance import get_or_create_current_appliance
from utils.events import EventBuilder
from utils.wait import wait_for
pytestmark = [
pytest.mark.usefixtures('uses_infra_providers', 'uses_cloud_providers'),
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.all_providers(metafunc, required_fields=['provisioning'])
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")
@pytest.yield_fixture(scope="function")
def vm_crud(provider, setup_provider_modscope, small_template_modscope):
vm = VM.factory(
'test_events_{}'.format(fauxfactory.gen_alpha(length=8).lower()),
provider,
template_name=small_template_modscope)
yield vm
if vm.does_vm_exist_on_provider():
vm.delete_from_provider()
@pytest.mark.meta(blockers=[1238371], automates=[1238371])
def test_vm_create(request, vm_crud, provider, register_event):
""" Test whether vm_create_complete event is emitted.
Prerequisities:
* A provider that is set up and able to deploy VMs
Steps:
* Create a Control setup (action, policy, profile) that apply a tag on a VM when
``VM Create Complete`` event comes
* Deploy the VM outside of CFME (directly in the provider)
* Refresh provider relationships and wait for VM to appear
* Assert the tag appears.
Metadata:
test_flag: provision
"""
action = Action(
fauxfactory.gen_alpha(),
"Tag",
dict(tag=("My Company Tags", "Environment", "Development")))
action.create()
request.addfinalizer(action.delete)
policy = VMControlPolicy(fauxfactory.gen_alpha())
policy.create()
request.addfinalizer(policy.delete)
policy.assign_events("VM Create Complete")
request.addfinalizer(policy.assign_events)
policy.assign_actions_to_event("VM Create Complete", action)
profile = PolicyProfile(fauxfactory.gen_alpha(), policies=[policy])
profile.create()
request.addfinalizer(profile.delete)
provider.assign_policy_profiles(profile.description)
request.addfinalizer(lambda: provider.unassign_policy_profiles(profile.description))
event = EventBuilder(get_or_create_current_appliance()).new_event(target_type='VmOrTemplate',
target_name=vm_crud.name,
event_type='vm_create')
register_event(event)
vm_crud.create_on_provider()
provider.refresh_provider_relationships()
vm_crud.wait_to_appear()
def _check():
return any(tag.category.display_name == "Environment" and tag.display_name == "Development"
for tag in vm_crud.get_tags())
wait_for(_check, num_sec=300, delay=15, message="tags to appear")
|
cosimoiaia/AlviBot
|
refs/heads/master
|
MailReader.py
|
2
|
#!/usr/bin/env python
###########################
#
# A.L.V.I. Bot
#
# MailReader.py: Handle Imap/POP connection, messages download and send vocal notification
#
# Author: Cosimo Iaia <cosimo.iaia@gmail.com>
# Date: 12/02/2010
#
# This file is distribuited under the terms of GNU General Public
# Copyright 2010 Cosimo Iaia
#
#
###########################
import email
import logging
import alviUtils
class MailReader:
def __init__(self, name, server, protocol, ssl, username, password, lastid, say):
self.name=name
self.username=username
self.passwd=password
self.address = server
self.say = say
self.log = logging.getLogger("Alvi")
self.lastid=str(lastid)
self.proto=protocol
self.ssl=ssl
def getLastId(self):
return self.lastid
def getName(self):
return self.name
def read(self):
try:
if self.proto == 'IMAP':
return self._readImap()
else:
return self._readPop()
except:
pass
def _readPop():
"""
import poplib
if self.ssl:
conn = poplib.POP3_SSL(self.address)
else:
conn = poplib.POP3(self.address)
pop.username(self.username)
pop.pass_(self.passwd)
numMessages=len(pop.list()[1])
"""
pass
def _readImap(self):
import imaplib
try:
if self.ssl:
conn= imaplib.IMAP4_SSL(self.address)
else:
conn= imaplib.IMAP4(self.address)
conn.login(self.username, self.passwd)
except:
message = "I am unable to connect to " +self.name
self.log.warning(message)
self.say(message)
raise RuntimeError, "Cannot connect to server"
code = conn.select('INBOX')[0]
if code != 'OK':
message="ERROR: Mailbox not found!!"
self.say(message)
self.log.warning(message)
raise RuntimeError, "Failed to select inbox"
code, data= conn.search(None, 'ALL')
if code == 'OK':
msgid_list= data[0].split()
else:
message="ERROR: messages DDI not found"
self.say(message)
self.log.warning(message)
raise RuntimeError, "Failed to get message IDs"
if not self.lastid in msgid_list:
lastidx = len(msgid_list)-3
else:
lastidx = msgid_list.index(self.lastid)+1
newids=msgid_list[lastidx:]
newids.reverse()
if len(newids) > 0:
self.lastid = newids[0]
greetings="su "+self.name+"."
self.say(greetings)
for id in newids:
code, data = conn.fetch(id, '(RFC822.HEADER)')
if code == 'OK':
mail = email.message_from_string(data[0][1])
From_ = mail.get('From').split(' ')
if(len(From_)>1):
# nice-formed From. i.e.: sender <sender@mail.com>
From = ''
for s in From_[:-1]: From += s # too ugly ?
#From = From.replace('"', '')
From = alviUtils.str_to_hlf(From)
else:
# old-formed From. i.e.: <sender@mail.com>
From = alviUtils.str_to_hlf(From_[0])
digest = From + " writes you: " + alviUtils.str_to_hlf(mail.get('Subject'))
self.log.info(self.name+': '+digest)
self.say(digest)
else:
message="ERROR: could not retrieve msg"
self.log.warning(message)
say(message)
raise RuntimeError, "could not retrieve msg"
conn.close()
conn.logout()
return self.lastid
if __name__ == "__main__":
import config
import festival,os
say = festival.open().say
conf = config.config(os.path.expanduser('~/.alvi.cfg'))
conf.load()
params = conf.getConf('Mail')
print params
m = MailReader(params['mail_server'],'IMAP', True, params['username'], params['password'], params['lastid'], say )
lastid = m.read()
print lastid
|
felipenaselva/felipe.repository
|
refs/heads/master
|
script.module.myconnpy/lib/examples/prepared_statements.py
|
14
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Example using MySQL Prepared Statements
Example using MySQL Connector/Python showing:
* usage of Prepared Statements
"""
from __future__ import print_function
import mysql.connector
from mysql.connector.cursor import MySQLCursorPrepared
def main(config):
output = []
cnx = mysql.connector.Connect(**config)
curprep = cnx.cursor(cursor_class=MySQLCursorPrepared)
cur = cnx.cursor()
# Drop table if exists, and create it new
stmt_drop = "DROP TABLE IF EXISTS names"
cur.execute(stmt_drop)
stmt_create = (
"CREATE TABLE names ("
"id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT, "
"name VARCHAR(30) DEFAULT '' NOT NULL, "
"cnt TINYINT UNSIGNED DEFAULT 0, "
"PRIMARY KEY (id))"
)
cur.execute(stmt_create)
# Connector/Python also allows ? as placeholders for MySQL Prepared
# statements.
prepstmt = "INSERT INTO names (name) VALUES (%s)"
# Preparing the statement is done only once. It can be done before
# without data, or later with data.
curprep.execute(prepstmt)
# Insert 3 records
names = ('Geert', 'Jan', 'Michel')
for name in names:
curprep.execute(prepstmt, (name,))
cnx.commit()
# We use a normal cursor issue a SELECT
output.append("Inserted data")
cur.execute("SELECT id, name FROM names")
for row in cur:
output.append("%d | %s" % (row[0], row[1]))
# Cleaning up, dropping the table again
cur.execute(stmt_drop)
cnx.close()
return output
if __name__ == '__main__':
#
# Configure MySQL login and database to use in config.py
#
from config import Config
config = Config.dbinfo().copy()
out = main(config)
print('\n'.join(out))
|
jralls/gramps
|
refs/heads/master
|
gramps/cli/clidbman.py
|
1
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provide the management of databases from CLI. This includes opening, renaming,
creating, and deleting of databases.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import re
import os
import sys
import ast
import time
from urllib.parse import urlparse
from urllib.request import urlopen, url2pathname
import tempfile
import logging
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.plug import BasePluginManager
from gramps.gen.config import config
from gramps.gen.constfunc import win
from gramps.gen.db.dbconst import DBLOGNAME
from gramps.gen.db.utils import make_database
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
LOG = logging.getLogger(".clidbman")
_LOG = logging.getLogger(DBLOGNAME)
#-------------------------------------------------------------------------
#
# constants
#
#-------------------------------------------------------------------------
DEFAULT_TITLE = _("Family Tree")
NAME_FILE = "name.txt"
META_NAME = "meta_data.db"
#-------------------------------------------------------------------------
#
# functions
#
#-------------------------------------------------------------------------
def _errordialog(title, errormessage):
"""
Show the error. A title for the error and an errormessage
"""
print(_('ERROR: %(title)s \n %(message)s') % {
'title': title,
'message': errormessage})
sys.exit()
#-------------------------------------------------------------------------
#
# CLIDbManager
#
#-------------------------------------------------------------------------
class CLIDbManager:
"""
Database manager without GTK functionality, allows users to create and
open databases
"""
IND_NAME = 0
IND_PATH = 1
IND_PATH_NAMEFILE = 2
IND_TVAL_STR = 3
IND_TVAL = 4
IND_USE_ICON_BOOL = 5
IND_STOCK_ID = 6
ICON_NONE = 0
ICON_RECOVERY = 1
ICON_LOCK = 2
ICON_OPEN = 3
ICON_MAP = {
ICON_NONE : None,
ICON_RECOVERY : None,
ICON_LOCK : None,
ICON_OPEN : None,
}
ERROR = _errordialog
def __init__(self, dbstate):
self.dbstate = dbstate
self.msg = None
if dbstate and dbstate.is_open():
self.active = dbstate.db.get_save_path()
else:
self.active = None
self.current_names = []
if dbstate:
self._populate_cli()
def empty(self, val):
"""
Callback that does nothing
"""
pass
def get_dbdir_summary(self, dirpath, name):
"""
dirpath: full path to database
name: proper name of family tree
Returns dictionary of summary item.
Should include at least, if possible:
_("Path")
_("Family Tree")
_("Last accessed")
_("Database")
_("Locked?")
and these details:
_("Number of people")
_("Version")
_("Schema version")
"""
dbid = "bsddb"
dbid_path = os.path.join(dirpath, "database.txt")
if os.path.isfile(dbid_path):
with open(dbid_path) as file:
dbid = file.read().strip()
if not self.is_locked(dirpath):
try:
database = make_database(dbid)
database.load(dirpath, None, update=False)
retval = database.get_summary()
database.close(update=False)
except Exception as msg:
retval = {_("Unavailable"): str(msg)[:74] + "..."}
else:
retval = {_("Unavailable"): "locked"}
retval.update({_("Family Tree"): name,
_("Path"): dirpath,
_("Database"): self.get_backend_name_from_dbid(dbid),
_("Last accessed"): time_val(dirpath)[1],
_("Locked?"): self.is_locked(dirpath),
})
return retval
def get_backend_name_from_dbid(self, dbid):
pmgr = BasePluginManager.get_instance()
for plugin in pmgr.get_reg_databases():
if plugin.id == dbid:
return plugin._name
return _("Unknown")
def print_family_tree_summaries(self, database_names=None):
"""
Prints a detailed list of the known family trees.
"""
print(_('Gramps Family Trees:'))
for item in self.current_names:
(name, dirpath, path_name, last,
tval, enable, stock_id, backend_type) = item
if (database_names is None or
any([(re.match("^" + dbname + "$", name) or
dbname == name)
for dbname in database_names])):
summary = self.get_dbdir_summary(dirpath, name)
print(_('Family Tree "%s":') % summary[_("Family Tree")])
for item in sorted(summary):
if item != "Family Tree":
# translators: needed for French, ignore otherwise
print(' ' + _("%(str1)s: %(str2)s"
) % {'str1' : item,
'str2' : summary[item]})
def family_tree_summary(self, database_names=None):
"""
Return a list of dictionaries of the known family trees.
"""
# make the default directory if it does not exist
summary_list = []
for item in self.current_names:
(name, dirpath, path_name, last,
tval, enable, stock_id, backend_type) = item
if (database_names is None or
any([(re.match("^" + dbname + "$", name) or
dbname == name)
for dbname in database_names])):
retval = self.get_dbdir_summary(dirpath, name)
summary_list.append(retval)
return summary_list
def _populate_cli(self):
"""
Get the list of current names in the database dir
"""
# make the default directory if it does not exist
dbdir = os.path.expanduser(config.get('database.path'))
db_ok = make_dbdir(dbdir)
self.current_names = []
if db_ok:
for dpath in os.listdir(dbdir):
dirpath = os.path.join(dbdir, dpath)
path_name = os.path.join(dirpath, NAME_FILE)
try:
with open(os.path.join(dirpath, "database.txt")) as file:
backend_type = file.read()
except:
backend_type = "bsddb"
if os.path.isfile(path_name):
with open(path_name, 'r', encoding='utf8') as file:
name = file.readline().strip()
(tval, last) = time_val(dirpath)
(enable, stock_id) = self.icon_values(
dirpath, self.active, self.dbstate.is_open())
if stock_id == 'gramps-lock':
last = find_locker_name(dirpath)
self.current_names.append(
(name, os.path.join(dbdir, dpath), path_name,
last, tval, enable, stock_id, backend_type))
self.current_names.sort()
def get_family_tree_path(self, name):
"""
Given a name, return None if name not existing or the path to the
database if it is a known database name.
"""
for data in self.current_names:
if data[0] == name:
return data[1]
return None
def family_tree_list(self):
"""
Return a list of name, dirname of the known family trees
"""
lst = [(x[0], x[1]) for x in self.current_names]
return lst
def __start_cursor(self, msg):
"""
Do needed things to start import visually, eg busy cursor
"""
print(_('Starting Import, %s') % msg)
def __end_cursor(self):
"""
Set end of a busy cursor
"""
print(_('Import finished...'))
def create_new_db_cli(self, title=None, create_db=True, dbid=None):
"""
Create a new database.
"""
new_path = find_next_db_dir()
os.mkdir(new_path)
path_name = os.path.join(new_path, NAME_FILE)
if title is None:
name_list = [name[0] for name in self.current_names]
title = find_next_db_name(name_list)
with open(path_name, "w", encoding='utf8') as name_file:
name_file.write(title)
if create_db:
# write the version number into metadata
if dbid is None:
dbid = "bsddb"
newdb = make_database(dbid)
newdb.write_version(new_path)
(tval, last) = time_val(new_path)
self.current_names.append((title, new_path, path_name,
last, tval, False, "", dbid))
return new_path, title
def _create_new_db(self, title=None, dbid=None, edit_entry=False):
"""
Create a new database, do extra stuff needed
"""
return self.create_new_db_cli(title, dbid=dbid)
def import_new_db(self, filename, user):
"""
Attempt to import the provided file into a new database.
A new database will only be created if an appropriate importer was
found.
:param filename: a fully-qualified path, filename, and
extension to open.
:param user: a :class:`.cli.user.User` or :class:`.gui.user.User`
instance for managing user interaction.
:returns: A tuple of (new_path, name) for the new database
or (None, None) if no import was performed.
"""
pmgr = BasePluginManager.get_instance()
# check to see if it isn't a filename directly:
if not os.path.isfile(filename):
# Allow URL names here; make temp file if necessary
url = urlparse(filename)
if url.scheme != "":
if url.scheme == "file":
filename = url2pathname(filename[7:])
else:
url_fp = urlopen(filename) # open URL
# make a temp local file:
ext = os.path.splitext(url.path)[1]
fd, filename = tempfile.mkstemp(suffix=ext)
temp_fp = os.fdopen(fd, "w")
# read from URL:
data = url_fp.read()
# write locally:
temp_fp.write(data)
url_fp.close()
from gramps.gen.db.dbconst import BDBVERSFN
# name not set
(name, ext) = os.path.splitext(os.path.basename(filename))
versionpath = os.path.join(name, BDBVERSFN)
# dbase not set
dbase = make_database("bsddb")
_LOG.debug("Write bsddb version %s", str(dbase.version()))
with open(versionpath, "w") as version_file:
version_file.write(str(dbase.version()))
temp_fp.close()
(name, ext) = os.path.splitext(os.path.basename(filename))
format = ext[1:].lower()
for plugin in pmgr.get_import_plugins():
if format == plugin.get_extension():
new_path, name = self._create_new_db(name, edit_entry=False)
# Create a new database
self.__start_cursor(_("Importing data..."))
## Use bsddb, for now, because we assumed that above.
dbid = "bsddb" ## config.get('database.backend')
dbase = make_database(dbid)
dbase.load(new_path, user.callback)
import_function = plugin.get_import_function()
import_function(dbase, filename, user)
# finish up
self.__end_cursor()
dbase.close()
return new_path, name
return None, None
def is_locked(self, dbpath):
"""
Returns True if there is a lock file in the dirpath
"""
if os.path.isfile(os.path.join(dbpath, "lock")):
return True
return False
def needs_recovery(self, dbpath):
"""
Returns True if the database in dirpath needs recovery
"""
if os.path.isfile(os.path.join(dbpath, "need_recover")):
return True
return False
def remove_database(self, dbname, user=None):
"""
Deletes a database folder given a pattenr that matches
its proper name.
"""
dbdir = os.path.expanduser(config.get('database.path'))
match_list = []
for dpath in os.listdir(dbdir):
dirpath = os.path.join(dbdir, dpath)
path_name = os.path.join(dirpath, NAME_FILE)
if os.path.isfile(path_name):
with open(path_name, 'r', encoding='utf8') as file:
name = file.readline().strip()
if re.match("^" + dbname + "$", name) or dbname == name:
match_list.append((name, dirpath))
if len(match_list) == 0:
CLIDbManager.ERROR("Family tree not found",
"No matching family tree found: '%s'" % dbname)
# now delete them:
for (name, directory) in match_list:
if user is None or user.prompt(
_('Remove family tree warning'),
_('Are you sure you want to remove '
'the family tree named\n"%s"?'
) % name,
_('yes'), _('no'), default_label=_('no')):
try:
for (top, dirs, files) in os.walk(directory):
for filename in files:
os.unlink(os.path.join(top, filename))
os.rmdir(directory)
except (IOError, OSError) as msg:
CLIDbManager.ERROR(_("Could not delete Family Tree"),
str(msg))
def rename_database(self, filepath, new_text):
"""
Renames the database by writing the new value to the name.txt file
Returns old_name, new_name if success, None, None if no success
"""
try:
with open(filepath, "r", encoding='utf8') as name_file:
old_text = name_file.read()
with open(filepath, "w", encoding='utf8') as name_file:
name_file.write(new_text)
except (OSError, IOError) as msg:
CLIDbManager.ERROR(_("Could not rename Family Tree"), str(msg))
return None, None
return old_text, new_text
def break_lock(self, dbpath):
"""
Breaks the lock on a database
"""
if os.path.exists(os.path.join(dbpath, "lock")):
os.unlink(os.path.join(dbpath, "lock"))
def icon_values(self, dirpath, active, is_open):
"""
If the directory path is the active path, then return values
that indicate to use the icon, and which icon to use.
"""
if os.path.isfile(os.path.join(dirpath, "need_recover")):
return (True, self.ICON_MAP[self.ICON_RECOVERY])
elif dirpath == active and is_open:
return (True, self.ICON_MAP[self.ICON_OPEN])
elif os.path.isfile(os.path.join(dirpath, "lock")):
return (True, self.ICON_MAP[self.ICON_LOCK])
else:
return (False, self.ICON_MAP[self.ICON_NONE])
def make_dbdir(dbdir):
"""
Create the default database directory, as defined by dbdir
"""
try:
if not os.path.isdir(dbdir):
os.makedirs(dbdir)
except (IOError, OSError) as msg:
LOG.error(_("\nERROR: Wrong database path in Edit Menu->Preferences.\n"
"Open preferences and set correct database path.\n\n"
"Details: Could not make database directory:\n %s\n\n"),
str(msg))
return False
return True
def find_next_db_name(name_list):
"""
Scan the name list, looking for names that do not yet exist.
Use the DEFAULT_TITLE as the basis for the database name.
"""
i = 1
while True:
title = "%s %d" % (DEFAULT_TITLE, i)
if title not in name_list:
return title
i += 1
def find_next_db_dir():
"""
Searches the default directory for the first available default
database name. Base the name off the current time. In all actuality,
the first should be valid.
"""
while True:
base = "%x" % int(time.time())
dbdir = os.path.expanduser(config.get('database.path'))
new_path = os.path.join(dbdir, base)
if not os.path.isdir(new_path):
break
return new_path
def time_val(dirpath):
"""
Return the last modified time of the database. We do this by looking
at the modification time of the meta db file. If this file does not
exist, we indicate that database as never modified.
"""
meta = os.path.join(dirpath, META_NAME)
if os.path.isfile(meta):
tval = os.stat(meta)[9]
# This gives creation date in Windows, but correct date in Linux
if win():
# Try to use last modified date instead in Windows
# and check that it is later than the creation date.
tval_mod = os.stat(meta)[8]
if tval_mod > tval:
tval = tval_mod
last = time.strftime('%x %X', time.localtime(tval))
else:
tval = 0
last = _("Never")
return (tval, last)
def find_locker_name(dirpath):
"""
Opens the lock file if it exists, reads the contexts which is "USERNAME"
and returns the contents, with correct string before "USERNAME",
so the message can be printed with correct locale.
If a file is encountered with errors, we return 'Unknown'
This data can eg be displayed in the time column of the manager
"""
try:
fname = os.path.join(dirpath, "lock")
with open(fname, 'r', encoding='utf8') as ifile:
username = ifile.read().strip()
# feature request 2356: avoid genitive form
last = _("Locked by %s") % username
except (OSError, IOError, UnicodeDecodeError):
last = _("Unknown")
return last
|
daonb/Open-Knesset
|
refs/heads/master
|
laws/migrations/0021_add_full_title.py
|
14
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for b in orm['laws.bill'].objects.all():
b.full_title = "%s %s" % (b.law.title, b.title)
b.save()
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'chaired_committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'replacing_in_committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"})
},
'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'where': ('django.db.models.fields.TextField', [], {}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['persons.Person']", 'symmetrical': 'False'})
},
'laws.bill': {
'Meta': {'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': "orm['laws.Vote']"}),
'full_title': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.Law']"}),
'popular_name': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'popular_name_slug': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000', 'db_index': 'True'}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.govlegislationcommitteedecision': {
'Meta': {'object_name': 'GovLegislationCommitteeDecision'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'gov_decisions'", 'null': 'True', 'to': "orm['laws.Bill']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'stand': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.govproposal': {
'Meta': {'object_name': 'GovProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'gov_proposal'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.knessetproposal': {
'Meta': {'object_name': 'KnessetProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'knesset_proposal'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['committees.Committee']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_into': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'duplicates'", 'null': 'True', 'to': "orm['laws.Law']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.membervotingstatistics': {
'Meta': {'object_name': 'MemberVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Member']"})
},
'laws.partyvotingstatistics': {
'Meta': {'object_name': 'PartyVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Party']"})
},
'laws.privateproposal': {
'Meta': {'object_name': 'PrivateProposal'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proposals'", 'null': 'True', 'to': "orm['laws.Bill']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_proposed'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.vote': {
'Meta': {'ordering': "('-time',)", 'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['laws']
|
uw-it-cte/uw-restclients
|
refs/heads/ems-master
|
restclients/test/pws/err500/pws.py
|
5
|
from django.test import TestCase
from django.conf import settings
from restclients.pws import PWS
from restclients.exceptions import DataFailureException
class PWSTest500(TestCase):
def test_pws_regid_500(self):
with self.settings(RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.errors.Always500'):
pws = PWS()
self.assertRaises(DataFailureException, pws.get_person_by_regid, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
try:
pws.get_person_by_regid("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
self.fail("This needs to be an exception")
except DataFailureException as ex:
self.assertEqual(ex.status, 500, "Exception has the right status")
self.assertEqual(ex.url, "/identity/v1/person/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/full.json", "Exception has the right url")
def test_pws_netid_500(self):
with self.settings(RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.errors.Always500'):
pws = PWS()
self.assertRaises(DataFailureException, pws.get_person_by_netid, "fake")
try:
pws.get_person_by_netid("fake")
self.fail("This needs to be an exception")
except DataFailureException as ex:
self.assertEqual(ex.status, 500, "Exception has the right status")
self.assertEqual(ex.url, "/identity/v1/person/fake/full.json", "Exception has the right url")
|
sushramesh/lwc
|
refs/heads/master
|
lib/python2.7/site-packages/setuptools/command/upload_docs.py
|
390
|
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
from distutils.command.upload import upload
import os
import socket
import zipfile
import tempfile
import sys
import shutil
from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3
from pkg_resources import iter_entry_points
errors = 'surrogateescape' if PY3 else 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = httplib.HTTPConnection(netloc)
elif schema == 'https':
conn = httplib.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = 'multipart/form-data; boundary=%s' % boundary
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
|
daira/tahoe-lafs-debian
|
refs/heads/master
|
setuptools-0.6c16dev6.egg/setuptools/command/install.py
|
7
|
import setuptools, sys, glob
from distutils.command.install import install as _install
from distutils.errors import DistutilsArgError
class install(_install):
"""Use easy_install to install the package, w/dependencies"""
user_options = _install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
('single-version-externally-managed', None,
"used by system package builders to create 'flat' eggs"),
]
boolean_options = _install.boolean_options + [
'old-and-unmanageable', 'single-version-externally-managed',
]
new_commands = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
sub_commands = [
cmd for cmd in _install.sub_commands if cmd[0] not in _nc
] + new_commands
def initialize_options(self):
_install.initialize_options(self)
self.old_and_unmanageable = None
self.single_version_externally_managed = None
self.no_compile = None # make DISTUTILS_DEBUG work right!
def finalize_options(self):
_install.finalize_options(self)
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system"
" packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return _install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
def run(self):
self.old_run()
if sys.platform == "win32":
from setuptools.command.scriptsetup import do_scriptsetup
do_scriptsetup()
def old_run(self):
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return _install.run(self)
# Attempt to detect whether we were called from setup() or by another
# command. If we were called by setup(), our caller will be the
# 'run_command' method in 'distutils.dist', and *its* caller will be
# the 'run_commands' method. If we were called any other way, our
# immediate caller *might* be 'run_command', but it won't have been
# called by 'run_commands'. This is slightly kludgy, but seems to
# work.
#
caller = sys._getframe(2)
caller_module = caller.f_globals.get('__name__','')
caller_name = caller.f_code.co_name
if caller_module != 'distutils.dist' or caller_name!='run_commands':
# We weren't called from the command line or setup(), so we
# should run in backward-compatibility mode to support bdist_*
# commands.
_install.run(self)
else:
self.do_egg_install()
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
# pick up setup-dir .egg files only: no .egg-info
cmd.package_index.scan(glob.glob('*.egg'))
self.run_command('bdist_egg')
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
if setuptools.bootstrap_install_from:
# Bootstrap self-installation of setuptools
args.insert(0, setuptools.bootstrap_install_from)
cmd.args = args
cmd.run()
setuptools.bootstrap_install_from = None
#
|
ReachingOut/unisubs
|
refs/heads/staging
|
apps/testhelpers/debug_toolbar_extra.py
|
5
|
from debug_toolbar.panels import DebugPanel
from haystack.backends import queries
import StringIO
import hotshot
import hotshot.stats
import os
import tempfile
import traceback
from django.conf import settings
from django.template import Template, Context
class ProfilingPanel(DebugPanel):
"""
Panel that runs the hotshot profiler during the request.
"""
name = 'Profiling'
has_content = True
def __init__(self, *args, **kwargs):
super(ProfilingPanel, self).__init__(*args, **kwargs)
self.formatted_stats = ''
self.stats = None
def nav_title(self):
"""Title showing in toolbar"""
return 'Profiling'
def nav_subtitle(self):
"""Subtitle showing until title in toolbar"""
if self.stats:
return "%d function calls in %.3f CPU seconds" % (self.stats.total_calls, self.stats.total_tt)
else:
return "self.stats evaluates to False"
def title(self):
"""Title showing in panel"""
return 'Profiling'
def url(self):
return ''
def content(self):
if self.stats and not self.formatted_stats:
try:
buffer = StringIO.StringIO()
self.stats.stream = buffer
self.stats.sort_stats('time', 'calls')
self.stats.print_stats(100)
self.formatted_stats = buffer.getvalue()
except:
print "Error getting hotshot stats:"
traceback.print_exc()
template = Template("""<code>{{ formatted_stats }}</code>""")
context = Context()
context.update(self.context)
context.update({
'formatted_stats': self.formatted_stats,
})
return template.render(context)
def process_view(self, request, view_func, view_args, view_kwargs):
if request.path.startswith(settings.ADMIN_MEDIA_PREFIX) or request.path.startswith('/__debug__/'):
return None
# Add a timestamp to the profile output when the callable is actually called.
handle, filename = tempfile.mkstemp(prefix='profiling')
os.close(handle)
prof = hotshot.Profile(filename)
try:
try:
try:
result = prof.runcall(view_func, *((request,) + view_args), **view_kwargs)
finally:
prof.close()
except:
raise
else:
self.stats = hotshot.stats.load(filename)
return result
finally:
os.unlink(filename)
class HaystackDebugPanel(DebugPanel):
"""
Panel that displays the Haystack queries.
"""
name = 'Haystack'
has_content = True
def nav_title(self):
return 'Haystack Queries '
def nav_subtitle(self):
total_time = sum([float(x['time']) for x in queries])
return '%s queries in %s ' % (len(queries), total_time)
def url(self):
return ''
def title(self):
print 'title'
return 'Haystack Queries '
def content(self):
print 'content'
return "".join(["<p>%s<br><br></p>" % q for q in queries])
|
Dunkas12/BeepBoopBot
|
refs/heads/master
|
lib/youtube_dl/extractor/newstube.py
|
55
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
)
class NewstubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newstube\.ru/media/(?P<id>.+)'
_TEST = {
'url': 'http://www.newstube.ru/media/telekanal-cnn-peremestil-gorod-slavyansk-v-krym',
'md5': '801eef0c2a9f4089fa04e4fe3533abdc',
'info_dict': {
'id': '728e0ef2-e187-4012-bac0-5a081fdcb1f6',
'ext': 'mp4',
'title': 'Телеканал CNN переместил город Славянск в Крым',
'description': 'md5:419a8c9f03442bc0b0a794d689360335',
'duration': 31.05,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page')
video_guid = self._html_search_regex(
r'<meta property="og:video:url" content="https?://(?:www\.)?newstube\.ru/freshplayer\.swf\?guid=(?P<guid>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})',
page, 'video GUID')
player = self._download_xml(
'http://p.newstube.ru/v2/player.asmx/GetAutoPlayInfo6?state=&url=%s&sessionId=&id=%s&placement=profile&location=n2' % (url, video_guid),
video_guid, 'Downloading player XML')
def ns(s):
return s.replace('/', '/%(ns)s') % {'ns': '{http://app1.newstube.ru/N2SiteWS/player.asmx}'}
error_message = player.find(ns('./ErrorMessage'))
if error_message is not None:
raise ExtractorError('%s returned error: %s' % (self.IE_NAME, error_message.text), expected=True)
session_id = player.find(ns('./SessionId')).text
media_info = player.find(ns('./Medias/MediaInfo'))
title = media_info.find(ns('./Name')).text
description = self._og_search_description(page)
thumbnail = media_info.find(ns('./KeyFrame')).text
duration = int(media_info.find(ns('./Duration')).text) / 1000.0
formats = []
for stream_info in media_info.findall(ns('./Streams/StreamInfo')):
media_location = stream_info.find(ns('./MediaLocation'))
if media_location is None:
continue
server = media_location.find(ns('./Server')).text
app = media_location.find(ns('./App')).text
media_id = stream_info.find(ns('./Id')).text
name = stream_info.find(ns('./Name')).text
width = int(stream_info.find(ns('./Width')).text)
height = int(stream_info.find(ns('./Height')).text)
formats.append({
'url': 'rtmp://%s/%s' % (server, app),
'app': app,
'play_path': '01/%s' % video_guid.upper(),
'rtmp_conn': ['S:%s' % session_id, 'S:%s' % media_id, 'S:n2'],
'page_url': url,
'ext': 'flv',
'format_id': 'rtmp' + ('-%s' % name if name else ''),
'width': width,
'height': height,
})
sources_data = self._download_json(
'http://www.newstube.ru/player2/getsources?guid=%s' % video_guid,
video_guid, fatal=False)
if sources_data:
for source in sources_data.get('Sources', []):
source_url = source.get('Src')
if not source_url:
continue
height = int_or_none(source.get('Height'))
f = {
'format_id': 'http' + ('-%dp' % height if height else ''),
'url': source_url,
'width': int_or_none(source.get('Width')),
'height': height,
}
source_type = source.get('Type')
if source_type:
mobj = re.search(r'codecs="([^,]+),\s*([^"]+)"', source_type)
if mobj:
vcodec, acodec = mobj.groups()
f.update({
'vcodec': vcodec,
'acodec': acodec,
})
formats.append(f)
self._check_formats(formats, video_guid)
self._sort_formats(formats)
return {
'id': video_guid,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
|
ejona86/grpc
|
refs/heads/master
|
tools/release/verify_python_release.py
|
8
|
#!/usr/bin/env python3
#Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verifies that all gRPC Python artifacts have been successfully published.
This script is intended to be run from a directory containing the artifacts
that have been uploaded and only the artifacts that have been uploaded. We use
PyPI's JSON API to verify that the proper filenames and checksums are present.
Note that PyPI may take several minutes to update its metadata. Don't have a
heart attack immediately.
This sanity check is a good first step, but ideally, we would automate the
entire release process.
"""
import argparse
import collections
import hashlib
import os
import requests
import sys
_DEFAULT_PACKAGES = [
"grpcio",
"grpcio-tools",
"grpcio-status",
"grpcio-health-checking",
"grpcio-reflection",
"grpcio-channelz",
"grpcio-testing",
]
Artifact = collections.namedtuple("Artifact", ("filename", "checksum"))
def _get_md5_checksum(filename):
"""Calculate the md5sum for a file."""
hash_md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def _get_local_artifacts():
"""Get a set of artifacts representing all files in the cwd."""
return set(
Artifact(f, _get_md5_checksum(f)) for f in os.listdir(os.getcwd()))
def _get_remote_artifacts_for_package(package, version):
"""Get a list of artifacts based on PyPi's json metadata.
Note that this data will not updated immediately after upload. In my
experience, it has taken a minute on average to be fresh.
"""
artifacts = set()
payload = requests.get("https://pypi.org/pypi/{}/{}/json".format(
package, version)).json()
for download_info in payload['releases'][version]:
artifacts.add(
Artifact(download_info['filename'], download_info['md5_digest']))
return artifacts
def _get_remote_artifacts_for_packages(packages, version):
artifacts = set()
for package in packages:
artifacts |= _get_remote_artifacts_for_package(package, version)
return artifacts
def _verify_release(version, packages):
"""Compare the local artifacts to the packages uploaded to PyPI."""
local_artifacts = _get_local_artifacts()
remote_artifacts = _get_remote_artifacts_for_packages(packages, version)
if local_artifacts != remote_artifacts:
local_but_not_remote = local_artifacts - remote_artifacts
remote_but_not_local = remote_artifacts - local_artifacts
if local_but_not_remote:
print("The following artifacts exist locally but not remotely.")
for artifact in local_but_not_remote:
print(artifact)
if remote_but_not_local:
print("The following artifacts exist remotely but not locally.")
for artifact in remote_but_not_local:
print(artifact)
sys.exit(1)
print("Release verified successfully.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Verify a release. Run this from a directory containing only the"
"artifacts to be uploaded. Note that PyPI may take several minutes"
"after the upload to reflect the proper metadata.")
parser.add_argument("version")
parser.add_argument("packages",
nargs='*',
type=str,
default=_DEFAULT_PACKAGES)
args = parser.parse_args()
_verify_release(args.version, args.packages)
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/completion/parameterFromUsages.after.py
|
83
|
def foo(bar):
bar.append()
def baz():
foo(['hello', 'world'])
|
hassaanaliw/flaskbook
|
refs/heads/master
|
app/user/__init__.py
|
2
|
__author__ = 'hassaanali'
|
kerr-huang/SL4A
|
refs/heads/master
|
python/src/Lib/email/header.py
|
54
|
# Copyright (C) 2002-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
"""Header encoding and decoding functionality."""
__all__ = [
'Header',
'decode_header',
'make_header',
]
import re
import binascii
import email.quoprimime
import email.base64mime
from email.errors import HeaderParseError
from email.charset import Charset
NL = '\n'
SPACE = ' '
USPACE = u' '
SPACE8 = ' ' * 8
UEMPTYSTRING = u''
MAXLINELEN = 76
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
(?=[ \t]|$) # whitespace or the end of the string
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Helpers
_max_append = email.quoprimime._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case string containing the name of the character
set specified in the encoded string.
An email.Errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If no encoding, just return the header
header = str(header)
if not ecre.search(header):
return [(header, None)]
decoded = []
dec = ''
for line in header.splitlines():
# This line might not have an encoding in it
if not ecre.search(line):
decoded.append((line, None))
continue
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if unenc:
# Should we continue a long line?
if decoded and decoded[-1][1] is None:
decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
else:
decoded.append((unenc, None))
if parts:
charset, encoding = [s.lower() for s in parts[0:2]]
encoded = parts[2]
dec = None
if encoding == 'q':
dec = email.quoprimime.header_decode(encoded)
elif encoding == 'b':
try:
dec = email.base64mime.decode(encoded)
except binascii.Error:
# Turn this into a higher level exception. BAW: Right
# now we throw the lower level exception away but
# when/if we get exception chaining, we'll preserve it.
raise HeaderParseError
if dec is None:
dec = encoded
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
else:
decoded.append((dec, charset))
del parts[0:3]
return decoded
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicit via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 76.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
# BAW: I believe `chunks' and `maxlinelen' should be non-public.
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
if header_name is None:
# We don't know anything about the field header so the first line
# is the same length as subsequent lines.
self._firstlinelen = maxlinelen
else:
# The first line should be shorter to take into account the field
# header. Also subtract off 2 extra for the colon and space.
self._firstlinelen = maxlinelen - len(header_name) - 2
# Second and subsequent lines should subtract off the length in
# columns of the continuation whitespace prefix.
self._maxlinelen = maxlinelen - cws_expanded_len
def __str__(self):
"""A synonym for self.encode()."""
return self.encode()
def __unicode__(self):
"""Helper for the built-in unicode function."""
uchunks = []
lastcs = None
for s, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(USPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(USPACE)
lastcs = nextcs
uchunks.append(unicode(s, str(charset)))
return UEMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a string, swap the args and do another comparison.
return other == self.encode()
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is true), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In this case, when producing an RFC 2822 compliant header
using RFC 2047 rules, the Unicode string will be encoded using the
following charsets in order: us-ascii, the charset hint, utf-8. The
first character set not to provoke a UnicodeError is used.
Optional `errors' is passed as the third argument to any unicode() or
ustr.encode() call.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
# If the charset is our faux 8bit charset, leave the string unchanged
if charset != '8bit':
# We need to test that the string can be converted to unicode and
# back to a byte string, given the input and output codecs of the
# charset.
if isinstance(s, str):
# Possibly raise UnicodeError if the byte string can't be
# converted to a unicode with the input codec of the charset.
incodec = charset.input_codec or 'us-ascii'
ustr = unicode(s, incodec, errors)
# Now make sure that the unicode could be converted back to a
# byte string with the output codec, which may be different
# than the iput coded. Still, use the original byte string.
outcodec = charset.output_codec or 'us-ascii'
ustr.encode(outcodec, errors)
elif isinstance(s, unicode):
# Now we have to be sure the unicode string can be converted
# to a byte string with a reasonable output codec. We want to
# use the byte string in the chunk.
for charset in USASCII, charset, UTF8:
try:
outcodec = charset.output_codec or 'us-ascii'
s = s.encode(outcodec, errors)
break
except UnicodeError:
pass
else:
assert False, 'utf-8 conversion failed'
self._chunks.append((s, charset))
def _split(self, s, charset, maxlinelen, splitchars):
# Split up a header safely for use with encode_chunks.
splittable = charset.to_splittable(s)
encoded = charset.from_splittable(splittable, True)
elen = charset.encoded_header_len(encoded)
# If the line's encoded length first, just return it
if elen <= maxlinelen:
return [(encoded, charset)]
# If we have undetermined raw 8bit characters sitting in a byte
# string, we really don't know what the right thing to do is. We
# can't really split it because it might be multibyte data which we
# could break if we split it between pairs. The least harm seems to
# be to not split the header at all, but that means they could go out
# longer than maxlinelen.
if charset == '8bit':
return [(s, charset)]
# BAW: I'm not sure what the right test here is. What we're trying to
# do is be faithful to RFC 2822's recommendation that ($2.2.3):
#
# "Note: Though structured field bodies are defined in such a way that
# folding can take place between many of the lexical tokens (and even
# within some of the lexical tokens), folding SHOULD be limited to
# placing the CRLF at higher-level syntactic breaks."
#
# For now, I can only imagine doing this when the charset is us-ascii,
# although it's possible that other charsets may also benefit from the
# higher-level syntactic breaks.
elif charset == 'us-ascii':
return self._split_ascii(s, charset, maxlinelen, splitchars)
# BAW: should we use encoded?
elif elen == len(s):
# We can split on _maxlinelen boundaries because we know that the
# encoding won't change the size of the string
splitpnt = maxlinelen
first = charset.from_splittable(splittable[:splitpnt], False)
last = charset.from_splittable(splittable[splitpnt:], False)
else:
# Binary search for split point
first, last = _binsplit(splittable, charset, maxlinelen)
# first is of the proper length so just wrap it in the appropriate
# chrome. last must be recursively split.
fsplittable = charset.to_splittable(first)
fencoded = charset.from_splittable(fsplittable, True)
chunk = [(fencoded, charset)]
return chunk + self._split(last, charset, self._maxlinelen, splitchars)
def _split_ascii(self, s, charset, firstlen, splitchars):
chunks = _split_ascii(s, firstlen, self._maxlinelen,
self._continuation_ws, splitchars)
return zip(chunks, [charset]*len(chunks))
def _encode_chunks(self, newchunks, maxlinelen):
# MIME-encode a header with many different charsets and/or encodings.
#
# Given a list of pairs (string, charset), return a MIME-encoded
# string suitable for use in a header field. Each pair may have
# different charsets and/or encodings, and the resulting header will
# accurately reflect each setting.
#
# Each encoding can be email.Utils.QP (quoted-printable, for
# ASCII-like character sets like iso-8859-1), email.Utils.BASE64
# (Base64, for non-ASCII like character sets like KOI8-R and
# iso-2022-jp), or None (no encoding).
#
# Each pair will be represented on a separate line; the resulting
# string will be in the format:
#
# =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
# =?charset2?b?SvxyZ2VuIEL2aW5n?="
chunks = []
for header, charset in newchunks:
if not header:
continue
if charset is None or charset.header_encoding is None:
s = header
else:
s = charset.header_encode(header)
# Don't add more folding whitespace than necessary
if chunks and chunks[-1].endswith(' '):
extra = ''
else:
extra = ' '
_max_append(chunks, s, maxlinelen, extra)
joiner = NL + self._continuation_ws
return joiner.join(chunks)
def encode(self, splitchars=';, '):
"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
This method will do its best to convert the string to the correct
character set used in email, and encode and line wrap it safely with
the appropriate scheme for that character set.
If the given charset is not known or an error occurs during
conversion, this function will return the header untouched.
Optional splitchars is a string containing characters to split long
ASCII lines on, in rough support of RFC 2822's `highest level
syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
"""
newchunks = []
maxlinelen = self._firstlinelen
lastlen = 0
for s, charset in self._chunks:
# The first bit of the next chunk should be just long enough to
# fill the next line. Don't forget the space separating the
# encoded words.
targetlen = maxlinelen - lastlen - 1
if targetlen < charset.encoded_header_len(''):
# Stick it on the next line
targetlen = maxlinelen
newchunks += self._split(s, charset, targetlen, splitchars)
lastchunk, lastcharset = newchunks[-1]
lastlen = lastcharset.encoded_header_len(lastchunk)
return self._encode_chunks(newchunks, maxlinelen)
def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
lines = []
maxlen = firstlen
for line in s.splitlines():
# Ignore any leading whitespace (i.e. continuation whitespace) already
# on the line, since we'll be adding our own.
line = line.lstrip()
if len(line) < maxlen:
lines.append(line)
maxlen = restlen
continue
# Attempt to split the line at the highest-level syntactic break
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace.
for ch in splitchars:
if ch in line:
break
else:
# There's nothing useful to split the line on, not even spaces, so
# just append this line unchanged
lines.append(line)
maxlen = restlen
continue
# Now split the line on the character plus trailing whitespace
cre = re.compile(r'%s\s*' % ch)
if ch in ';,':
eol = ch
else:
eol = ''
joiner = eol + ' '
joinlen = len(joiner)
wslen = len(continuation_ws.replace('\t', SPACE8))
this = []
linelen = 0
for part in cre.split(line):
curlen = linelen + max(0, len(this)-1) * joinlen
partlen = len(part)
onfirstline = not lines
# We don't want to split after the field name, if we're on the
# first line and the field name is present in the header string.
if ch == ' ' and onfirstline and \
len(this) == 1 and fcre.match(this[0]):
this.append(part)
linelen += partlen
elif curlen + partlen > maxlen:
if this:
lines.append(joiner.join(this) + eol)
# If this part is longer than maxlen and we aren't already
# splitting on whitespace, try to recursively split this line
# on whitespace.
if partlen > maxlen and ch != ' ':
subl = _split_ascii(part, maxlen, restlen,
continuation_ws, ' ')
lines.extend(subl[:-1])
this = [subl[-1]]
else:
this = [part]
linelen = wslen + len(this[-1])
maxlen = restlen
else:
this.append(part)
linelen += partlen
# Put any left over parts on a line by themselves
if this:
lines.append(joiner.join(this))
return lines
def _binsplit(splittable, charset, maxlinelen):
i = 0
j = len(splittable)
while i < j:
# Invariants:
# 1. splittable[:k] fits for all k <= i (note that we *assume*,
# at the start, that splittable[:0] fits).
# 2. splittable[:k] does not fit for any k > j (at the start,
# this means we shouldn't look at any k > len(splittable)).
# 3. We don't know about splittable[:k] for k in i+1..j.
# 4. We want to set i to the largest k that fits, with i <= k <= j.
#
m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
chunk = charset.from_splittable(splittable[:m], True)
chunklen = charset.encoded_header_len(chunk)
if chunklen <= maxlinelen:
# m is acceptable, so is a new lower bound.
i = m
else:
# m is not acceptable, so final i must be < m.
j = m - 1
# i == j. Invariant #1 implies that splittable[:i] fits, and
# invariant #2 implies that splittable[:i+1] does not fit, so i
# is what we're looking for.
first = charset.from_splittable(splittable[:i], False)
last = charset.from_splittable(splittable[i:], False)
return first, last
|
SmingHub/Sming
|
refs/heads/develop
|
Sming/Libraries/RF24/tests/pingpair_test/runtest.py
|
9
|
#!/opt/local/bin/python
import sys,serial
def read_until(token):
while 1:
line = ser.readline(None,"\r")
sys.stdout.write(line)
if (line.startswith(token)):
break
return line
ser = serial.Serial(sys.argv[1], 57600, timeout=5, dsrdtr=False, rtscts=False)
read_until("+READY")
ser.write(sys.argv[2])
line = read_until("+OK")
ser.close()
if (line.find("PASS") != -1):
sys.exit(0)
else:
sys.exit(1)
|
anetasie/sherpa
|
refs/heads/master
|
sherpa/optmethods/tests/test_ncoresopt.py
|
1
|
#
# Copyright (C) 2019, 2020 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy as np
import pytest
from sherpa.optmethods import _tstoptfct
from sherpa.optmethods.ncoresnm import ncoresNelderMead
from sherpa.optmethods.ncoresde import ncoresDifEvo
from sherpa.utils import _ncpus
def init(name, npar):
x0, xmin, xmax, fmin = _tstoptfct.init(name, npar)
return x0, xmin, xmax, fmin
def tst_opt(opt, fcn, npar, reltol=1.0e-3, abstol=1.0e-3):
def func(arg):
return fcn(arg)[0]
x0, xmin, xmax, fmin = init(fcn.__name__, npar)
nfev, fval, par = opt(func, x0, xmin, xmax)
assert fmin == pytest.approx(fval, rel=reltol, abs=abstol)
NCORES_NM = ncoresNelderMead()
NCORES_DE = ncoresDifEvo()
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Ackley(opt, npar=4):
tst_opt(opt, _tstoptfct.Ackley, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Bohachevsky1(opt, npar=2):
tst_opt(opt, _tstoptfct.Bohachevsky1, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Bohachevsky2(opt, npar=2):
tst_opt(opt, _tstoptfct.Bohachevsky2, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Bohachevsky3(opt, npar=2):
tst_opt(opt, _tstoptfct.Bohachevsky3, npar)
@pytest.mark.slow
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Booth(opt, npar=6):
tst_opt(opt, _tstoptfct.Booth, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Branin(opt, npar=2):
tst_opt(opt, _tstoptfct.Branin, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Branin2(opt, npar=2):
tst_opt(opt, _tstoptfct.Branin2, npar)
@pytest.mark.slow
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Colville(opt, npar=4):
tst_opt(opt, _tstoptfct.Colville, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_decanom(opt, npar=2):
tst_opt(opt, _tstoptfct.decanom, npar)
@pytest.mark.slow
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_dodecal(opt, npar=3):
tst_opt(opt, _tstoptfct.dodecal, npar)
@pytest.mark.slow
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_DixonPrice(opt, npar=5):
tst_opt(opt, _tstoptfct.DixonPrice, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Hansen(opt, npar=2):
tst_opt(opt, _tstoptfct.Hansen, npar)
@pytest.mark.slow
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Holzman2(opt, npar=3):
tst_opt(opt, _tstoptfct.Holzman2, npar)
@pytest.mark.slow
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Judge(opt, npar=3):
tst_opt(opt, _tstoptfct.Judge, npar)
@pytest.mark.parametrize("opt", [NCORES_NM])
def test_McCormick(opt, npar=2):
tst_opt(opt, _tstoptfct.McCormick, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Michalewicz(opt, npar=2):
tst_opt(opt, _tstoptfct.Michalewicz, npar)
@pytest.mark.parametrize("opt", [NCORES_NM])
def test_Paviani(opt, npar=10):
tst_opt(opt, _tstoptfct.Paviani, npar)
@pytest.mark.slow
def test_Paviani(npar=10):
tst_opt(NCORES_DE, _tstoptfct.Paviani, npar)
@pytest.mark.parametrize("opt", [NCORES_NM])
def test_Rastrigin(opt, npar=4):
tst_opt(opt, _tstoptfct.Rastrigin, npar)
@pytest.mark.slow
def test_Rastrigin(npar=4):
tst_opt(NCORES_DE, _tstoptfct.Rastrigin, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_seqp(opt, npar=2):
tst_opt(opt, _tstoptfct.seqp, npar)
@pytest.mark.slow
def test_seqp(npar=2):
tst_opt(NCORES_DE, _tstoptfct.seqp, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Shubert(opt, npar=2):
tst_opt(opt, _tstoptfct.Shubert, npar)
@pytest.mark.parametrize("opt", [NCORES_NM, NCORES_DE])
def test_Trecanni(opt, npar=2):
tst_opt(opt, _tstoptfct.Trecanni, npar)
|
hossamkhader/ns-3
|
refs/heads/master
|
examples/stats/examples-to-run.py
|
199
|
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("wifi-example-sim", "True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = []
|
WmHHooper/aima-python
|
refs/heads/master
|
submissions/Zemgulys/myCSPs.py
|
1
|
import csp
rgby = ['R', 'G', 'B', 'Y']
d2 = {'U': rgby,
'P': rgby,
'V': rgby,
'A': rgby,
'K': rgby,
'S': rgby,
'T': rgby,
'M': rgby,
'TG': rgby,
'KL': rgby}
v2 = d2.keys()
Lithuania = {'U': ['V', 'P'],
'P': ['U', 'V', 'K', 'S'],
'V': ['U', 'P', 'K', 'A'],
'A': ['V', 'K', 'M'],
'K': ['A', 'M', 'TG', 'S', 'P', 'V'],
'S': ['P', 'K', 'TG', 'T'],
'T': ['S', 'TG', 'KL'],
'M': ['A', 'K', 'TG'],
'TG': ['M', 'K', 'S', 'T', 'KL'],
'KL': ['TG', 'T']}
def constraints(A, a, B, b):
if A == B: # e.g. NSW == NSW
return True
if a == b: # e.g. WA = G and SA = G
return False
return True
c2 = csp.CSP(v2, d2, Lithuania, constraints)
c2.label = 'Lithuania Map'
myCSPs = [
{
'csp' : c2,
# 'select_unassigned_variable': csp.mrv,
# 'order_domain_values': csp.lcv,
# 'inference': csp.mac,
# 'inference': csp.forward_checking,
},
{
'csp' : c2,
'select_unassigned_variable': csp.mrv,
# 'order_domain_values': csp.lcv,
# 'inference': csp.mac,
# 'inference': csp.forward_checking,
},
{
'csp' : c2,
# 'select_unassigned_variable': csp.mrv,
'order_domain_values': csp.lcv,
# 'inference': csp.mac,
# 'inference': csp.forward_checking,
},
{
'csp' : c2,
# 'select_unassigned_variable': csp.mrv,
# 'order_domain_values': csp.lcv,
'inference': csp.mac,
# 'inference': csp.forward_checking,
},
{
'csp' : c2,
# 'select_unassigned_variable': csp.mrv,
# 'order_domain_values': csp.lcv,
# 'inference': csp.mac,
'inference': csp.forward_checking,
},
{
'csp' : c2,
'select_unassigned_variable': csp.mrv,
'order_domain_values': csp.lcv,
'inference': csp.mac,
# 'inference': csp.forward_checking,
},
]
|
sunlianqiang/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/lib2to3/fixes/fix_itertools.py
|
136
|
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in ('ifilterfalse', 'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node which contains ('.', 'function') with the
# function (to be consistent with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
|
xiandiancloud/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/empty_asset_trashcan.py
|
206
|
from django.core.management.base import BaseCommand, CommandError
from xmodule.contentstore.utils import empty_asset_trashcan
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from .prompt import query_yes_no
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class Command(BaseCommand):
help = '''Empty the trashcan. Can pass an optional course_id to limit the damage.'''
def handle(self, *args, **options):
if len(args) != 1 and len(args) != 0:
raise CommandError("empty_asset_trashcan requires one or no arguments: |<course_id>|")
if len(args) == 1:
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
course_ids = [course_key]
else:
course_ids = [course.id for course in modulestore().get_courses()]
if query_yes_no("Emptying trashcan. Confirm?", default="no"):
empty_asset_trashcan(course_ids)
|
gicking/STM8_templates
|
refs/heads/master
|
Projects/muBoard_Examples/LCD-display_I2C/build_upload.py
|
13
|
#!/usr/bin/python
'''
Script for building and uploading a STM8 project with dependency auto-detection
'''
# set general options
UPLOAD = 'BSL' # select 'BSL' or 'SWIM'
TERMINAL = False # set True to open terminal after upload
RESET = 1 # STM8 reset: 0=skip, 1=manual, 2=DTR line (RS232), 3=send 'Re5eT!' @ 115.2kBaud, 4=Arduino pin 8, 5=Raspi pin 12
OPTIONS = '' # e.g. device for SPL ('-DSTM8S105', see stm8s.h)
# set path to root of STM8 templates
ROOT_DIR = '../../../'
LIB_ROOT = ROOT_DIR + 'Library/'
TOOL_DIR = ROOT_DIR + 'Tools/'
OBJDIR = 'output'
TARGET = 'main.ihx'
# set OS specific
import platform
if platform.system() == 'Windows':
PORT = 'COM10'
SWIM_PATH = 'C:/Programme/STMicroelectronics/st_toolset/stvp/'
SWIM_TOOL = 'ST-LINK'
SWIM_NAME = 'STM8S105x6' # STM8 Discovery
#SWIM_NAME = 'STM8S208xB' # muBoard
MAKE_TOOL = 'mingw32-make.exe'
else:
PORT = '/dev/ttyUSB0'
SWIM_TOOL = 'stlink'
SWIM_NAME = 'stm8s105c6' # STM8 Discovery
#SWIM_NAME = 'stm8s208?b' # muBoard
MAKE_TOOL = 'make'
# import required modules
import sys
import os
import platform
import argparse
sys.path.insert(0,TOOL_DIR) # assert that TOOL_DIR is searched first
import misc
from buildProject import createMakefile, buildProject
from uploadHex import stm8gal, stm8flash, STVP
##################
# main program
##################
# commandline parameters with defaults
parser = argparse.ArgumentParser(description="compile and upload STM8 project")
parser.add_argument("--skipmakefile", default=False, action="store_true" , help="skip creating Makefile")
parser.add_argument("--skipbuild", default=False, action="store_true" , help="skip building project")
parser.add_argument("--skipupload", default=False, action="store_true" , help="skip uploading hexfile")
parser.add_argument("--skipterminal", default=False, action="store_true" , help="skip opening terminal")
parser.add_argument("--skippause", default=False, action="store_true" , help="skip pause before exit")
args = parser.parse_args()
# create Makefile
if args.skipmakefile == False:
createMakefile(workdir='.', libroot=LIB_ROOT, outdir=OBJDIR, target=TARGET, options=OPTIONS)
# build target
if args.skipbuild == False:
buildProject(workdir='.', make=MAKE_TOOL)
# upload code via UART bootloader
if args.skipupload == False:
if UPLOAD == 'BSL':
stm8gal(tooldir=TOOL_DIR, port=PORT, outdir=OBJDIR, target=TARGET, reset=RESET)
# upload code via SWIM. Use stm8flash on Linux, STVP on Windows (due to libusb issues)
if UPLOAD == 'SWIM':
if platform.system() == 'Windows':
STVP(tooldir=SWIM_PATH, device=SWIM_NAME, hardware=SWIM_TOOL, outdir=OBJDIR, target=TARGET)
else:
stm8flash(tooldir=TOOL_DIR, device=SWIM_NAME, hardware=SWIM_TOOL, outdir=OBJDIR, target=TARGET)
# if specified open serial console after upload
if args.skipterminal == False:
if TERMINAL == True:
cmd = 'python '+TOOL_DIR+'terminal.py -p '+PORT
exitcode = os.system(cmd)
if (exitcode != 0):
sys.stderr.write('error '+str(exitcode)+'\n\n')
misc.Exit(exitcode)
# wait for return, then close window
if args.skippause == False:
if (sys.version_info.major == 3):
input("\npress return to exit ... ")
else:
raw_input("\npress return to exit ... ")
sys.stdout.write('\n\n')
# END OF MODULE
|
jaredpar/cli
|
refs/heads/rel/1.0.0
|
packaging/debian/package_tool/scripts/config_template_generator.py
|
1
|
#!/usr/bin/python
#
# Copyright (c) .NET Foundation and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
#
# Parses debian_config.json and generates appropriate templates
# Where optional defaults exist, they are defined in the template_dict
# of the appropriate generation function
import os
import sys
import json
import datetime
FILE_CHANGELOG = 'changelog'
FILE_CONTROL = 'control'
FILE_COPYRIGHT = 'copyright'
FILE_SYMLINK_FORMAT = '{package_name}.links'
PACKAGE_ROOT_FORMAT = "usr/share/{package_name}"
CHANGELOG_DATE_FORMAT = "%a, %d %b %Y %H:%M:%S %z"
# UTC Timezone for Changelog date
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
# Generation Functions
def generate_and_write_all(config_data, template_dir, output_dir, package_version=None):
try:
changelog_contents = generate_changelog(config_data, template_dir, package_version=package_version)
control_contents = generate_control(config_data, template_dir)
copyright_contents = generate_copyright(config_data, template_dir)
symlink_contents = generate_symlinks(config_data)
except Exception as exc:
print exc
help_and_exit("Error: Generation Failed, check your config file.")
write_file(changelog_contents, output_dir, FILE_CHANGELOG)
write_file(control_contents, output_dir, FILE_CONTROL)
write_file(copyright_contents, output_dir, FILE_COPYRIGHT)
# Symlink File is optional
if symlink_contents:
symlink_filename = get_symlink_filename(config_data)
write_file(symlink_contents, output_dir, symlink_filename)
return
def generate_changelog(config_data, template_dir, package_version=None):
template = get_template(template_dir, FILE_CHANGELOG)
release_data = config_data["release"]
# Allow for Version Override
config_package_version = release_data["package_version"]
if package_version is None:
package_version = config_package_version
template_dict = dict(\
PACKAGE_VERSION=package_version,
PACKAGE_REVISION=release_data["package_revision"],
CHANGELOG_MESSAGE=release_data["changelog_message"],
URGENCY=release_data.get("urgency", "low"),
PACKAGE_NAME=config_data["package_name"],
MAINTAINER_NAME=config_data["maintainer_name"],
MAINTAINER_EMAIL=config_data["maintainer_email"],
DATE=datetime.datetime.now(UTC()).strftime(CHANGELOG_DATE_FORMAT)
)
contents = template.format(**template_dict)
return contents
def generate_control(config_data, template_dir):
template = get_template(template_dir, FILE_CONTROL)
dependency_data = config_data.get("debian_dependencies", None)
dependency_str = get_dependendent_packages_string(dependency_data)
conflict_data = config_data.get("package_conflicts", [])
conflict_str = ', '.join(conflict_data)
# Default to empty dict, so we don't explode on nested optional values
control_data = config_data.get("control", dict())
template_dict = dict(\
SHORT_DESCRIPTION=config_data["short_description"],
LONG_DESCRIPTION=config_data["long_description"],
HOMEPAGE=config_data.get("homepage", ""),
SECTION=control_data.get("section", "misc"),
PRIORITY=control_data.get("priority", "low"),
ARCH=control_data.get("architecture", "all"),
DEPENDENT_PACKAGES=dependency_str,
CONFLICT_PACKAGES=conflict_str,
PACKAGE_NAME=config_data["package_name"],
MAINTAINER_NAME=config_data["maintainer_name"],
MAINTAINER_EMAIL=config_data["maintainer_email"]
)
contents = template.format(**template_dict)
return contents
def generate_copyright(config_data, template_dir):
template = get_template(template_dir, FILE_COPYRIGHT)
license_data = config_data["license"]
template_dict = dict(\
COPYRIGHT_TEXT=config_data["copyright"],
LICENSE_NAME=license_data["type"],
LICENSE_TEXT=license_data["full_text"]
)
contents = template.format(**template_dict)
return contents
def generate_symlinks(config_data):
symlink_entries = []
package_root_path = get_package_root(config_data)
symlink_data = config_data.get("symlinks", dict())
for package_rel_path, symlink_path in symlink_data.iteritems():
package_abs_path = os.path.join(package_root_path, package_rel_path)
symlink_entries.append( '%s %s' % (package_abs_path, symlink_path) )
return '\n'.join(symlink_entries)
# Helper Functions
def get_package_root(config_data):
package_name = config_data["package_name"]
return PACKAGE_ROOT_FORMAT.format(package_name=package_name)
def get_symlink_filename(config_data):
package_name = config_data["package_name"]
return FILE_SYMLINK_FORMAT.format(package_name=package_name)
def get_dependendent_packages_string(debian_dependency_data):
if debian_dependency_data is None:
return ""
dependencies = []
for debian_package_name in debian_dependency_data:
dep_str = debian_package_name
if debian_dependency_data[debian_package_name].get("package_version", None):
debian_package_version = debian_dependency_data[debian_package_name].get("package_version")
dep_str += " (>= %s)" % debian_package_version
dependencies.append(dep_str)
# Leading Comma is important here
return ', ' + ', '.join(dependencies)
def load_json(json_path):
json_data = None
with open(json_path, 'r') as json_file:
json_data = json.load(json_file)
return json_data
def get_template(template_dir, name):
path = os.path.join(template_dir, name)
template_contents = None
with open(path, 'r') as template_file:
template_contents = template_file.read()
return template_contents
def write_file(contents, output_dir, name):
path = os.path.join(output_dir, name)
with open(path, 'w') as out_file:
out_file.write(contents)
return
# Tool Functions
def help_and_exit(msg):
print msg
sys.exit(1)
def print_usage():
print "Usage: config_template_generator.py [config file path] [template directory path] [output directory] (package version)"
def parse_and_validate_args():
if len(sys.argv) < 4:
print_usage()
help_and_exit("Error: Invalid Arguments")
config_path = sys.argv[1]
template_dir = sys.argv[2]
output_dir = sys.argv[3]
version_override = None
if len(sys.argv) >= 5:
version_override = sys.argv[4]
if not os.path.isfile(config_path):
help_and_exit("Error: Invalid config file path")
if not os.path.isdir(template_dir):
help_and_exit("Error: Invalid template directory path")
if not os.path.isdir(output_dir):
help_and_exit("Error: Invalid output directory path")
return (config_path, template_dir, output_dir, version_override)
def execute():
config_path, template_dir, output_dir, version_override = parse_and_validate_args()
config_data = load_json(config_path)
generate_and_write_all(config_data, template_dir, output_dir, package_version=version_override)
if __name__ == "__main__":
execute()
|
pombredanne/pyelftools
|
refs/heads/master
|
elftools/construct/lib/hex.py
|
24
|
from .py3compat import byte2int, int2byte, bytes2str
# Map an integer in the inclusive range 0-255 to its string byte representation
_printable = dict((i, ".") for i in range(256))
_printable.update((i, bytes2str(int2byte(i))) for i in range(32, 128))
def hexdump(data, linesize):
"""
data is a bytes object. The returned result is a string.
"""
prettylines = []
if len(data) < 65536:
fmt = "%%04X %%-%ds %%s"
else:
fmt = "%%08X %%-%ds %%s"
fmt = fmt % (3 * linesize - 1,)
for i in range(0, len(data), linesize):
line = data[i : i + linesize]
hextext = " ".join('%02x' % byte2int(b) for b in line)
rawtext = "".join(_printable[byte2int(b)] for b in line)
prettylines.append(fmt % (i, str(hextext), str(rawtext)))
return prettylines
class HexString(bytes):
"""
Represents bytes that will be hex-dumped to a string when its string
representation is requested.
"""
def __init__(self, data, linesize = 16):
self.linesize = linesize
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __str__(self):
if not self:
return "''"
sep = "\n"
return sep + sep.join(
hexdump(self, self.linesize))
|
jimboatarm/workload-automation
|
refs/heads/master
|
wlauto/external/louie/__init__.py
|
10
|
__all__ = [
'dispatcher',
'error',
'plugin',
'robustapply',
'saferef',
'sender',
'signal',
'version',
'connect',
'disconnect',
'get_all_receivers',
'reset',
'send',
'send_exact',
'send_minimal',
'send_robust',
'install_plugin',
'remove_plugin',
'Plugin',
'QtWidgetPlugin',
'TwistedDispatchPlugin',
'Anonymous',
'Any',
'All',
'Signal',
]
import louie.dispatcher, louie.error, louie.plugin, louie.robustapply, \
louie.saferef, louie.sender, louie.signal, louie.version
from louie.dispatcher import \
connect, disconnect, get_all_receivers, reset, \
send, send_exact, send_minimal, send_robust
from louie.plugin import \
install_plugin, remove_plugin, Plugin, \
QtWidgetPlugin, TwistedDispatchPlugin
from louie.sender import Anonymous, Any
from louie.signal import All, Signal
|
Averroes/raft
|
refs/heads/master
|
dialogs/AnalysisConfigDialog.py
|
11
|
#
# Analysis Config dialog
#
# Authors:
# Justin Engler
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
from PyQt4.QtCore import (Qt, SIGNAL, QObject)
from PyQt4.QtGui import *
from utility import TreeWidgetTools
from ui import AnalysisConfig
class AnalysisConfigDialog(QDialog, AnalysisConfig.Ui_analysisConfigDialog):
""" The Analysis Config Dialog """
def __init__(self, framework, parent=None):
super(AnalysisConfigDialog, self).__init__(parent)
self.setupUi(self)
self.framework = framework
self.analyzerobj=None
#self.analyzerConfig=None
def viewItemSelected(self, index):
selecteditem=self.analyzerList.itemFromIndex(index)
#Don't do much if it's not a leaf node in the tree
if selecteditem.childCount()==0:
self.analyzerobj=selecteditem.customdata
self.selecteditem=selecteditem
self.analyzerDesc.setText(self.analyzerobj.generalInfoToHTML())
#Attempt to read config for selected analyzer from the db
configdata=self.framework.get_config_value('ANALYSIS',str(self.analyzerobj.__class__))
#print "read after analyzer click: %s"%configdata
if configdata is not None and len(configdata)>0:
self.analyzerobj.setConfiguration(configdata)
rootitem = self.analyzerConfig.invisibleRootItem()
if rootitem.childCount() > 0:
self.deleteChildren(rootitem)
#while self.analyzerConfig.topLevelItemCount > 0:
# self.analyzerConfig.removeItemWidget(self.analyzerConfig.headerItem(),1)
#self.verticalLayoutTopRight.removeWidget(self.analyzerConfig)
self.analyzerobj.generateConfigurationGui(self.analyzerConfig)
#generatedwidget=self.analyzerobj.generateConfigurationGui()
#generatedwidget.setParent(self.RightWidget)
#self.verticalLayoutTopRight.addWidget(generatedwidget)
#generatedwidget.setExpandsOnDoubleClick(False)
#self.analyzerConfig=generatedwidget
#self.analyzerConfig.show()
def deleteChildren(self,selecteditem):
numbranches=selecteditem.childCount()
if numbranches > 0:
for i in range(numbranches):
child = selecteditem.child(0)
self.deleteChildren(child)
selecteditem.removeChild(child)
del child
def closeButtonClicked(self):
#print "CLOSE"
self.close()
def saveButtonClicked(self):
#print "SAVE"
if self.analyzerobj is not None:
newsettings=TreeWidgetTools.tree_widget_to_dict(self.analyzerConfig)
#print self.analyzerobj
self.framework.set_config_value('ANALYSIS',str(self.analyzerobj.__class__),
self.analyzerobj.encodeConfiguration(newsettings))
#print newsettings, newsettings.__class__
self.analyzerobj.setConfiguration(newsettings)
checked = False
if self.selecteditem.checkState(1) == Qt.Checked:
checked = True
self.framework.set_config_value('ANALYSISENABLED',str(self.analyzerobj.__class__),checked)
def saveAllAnalyzerSettings(self):
#TODO: Add stuff for saving settings, not just enable/disable
rootitem=self.analyzerList.invisibleRootItem()
self.recursiveSaveSettings(rootitem)
def recursiveSaveSettings(self,rootitem):
childcount=rootitem.childCount()
for i in range(childcount):
currentchild=rootitem.child(i)
if hasattr(currentchild,'customdata'):
checked = False
if currentchild.checkState(1) == Qt.Checked:
checked = True
self.framework.set_config_value('ANALYSISENABLED',str(currentchild.customdata.__class__),checked)
self.recursiveSaveSettings(currentchild)
def addnodeButtonClicked(self):
"""
If the addnode button is clicked, copy the currently selected node and insert it back into the tree.
"""
#Don't do anything if there's nothing to do
if self.analyzerConfig is None or len(self.analyzerConfig.selectedItems())==0:
return
selecteditems=self.analyzerConfig.selectedItems()
copiedvalues={}
#print selecteditems[0].parent()
TreeWidgetTools.recursive_tree_widget_to_dict_helper(selecteditems[0],copiedvalues)
#Pull original keys out of copiedvalues so the changes don't interfere with the loop
tempkeylist=tuple(copiedvalues.keys())
for key in tempkeylist:
copiedvalues['COPY OF '+key]=copiedvalues[key]
del copiedvalues[key]
#if the parent is None, that means we're copying a rootish-level item. Use the invisible root to add to
virtualparent=selecteditems[0].parent() if selecteditems[0].parent() is not None else self.analyzerConfig.invisibleRootItem()
#add the copied dictionary to the tree
TreeWidgetTools.recursive_generate_tree_widget_helper(copiedvalues,virtualparent)
def delnodeButtonClicked(self):
#Don't do anything if there's nothing to do
if self.analyzerConfig is None or len(self.analyzerConfig.selectedItems())==0:
return
selecteditem=self.analyzerConfig.selectedItems()[0]
#if the parent is None, that means we're removing a rootish-level item. Use the invisible root to remove from
virtualparent=selecteditem.parent() if selecteditem.parent() is not None else self.analyzerConfig.invisibleRootItem()
virtualparent.removeChild(selecteditem)
del selecteditem
def defaultsButtonClicked(self):
self.framework.clear_config_value("ANALYSIS")
self.framework.clear_config_value("ANALYSISENABLED")
def saveAllButtonClicked(self):
self.saveAllAnalyzerSettings()
|
anand-c-goog/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/bijector.py
|
3
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Bijector Ops.
An API for reversible (bijective) transformations of random variables.
## Background
Differentiable, bijective transformations of continuous random variables alter
the calculations made in the cumulative/probability distribution functions and
sample function. This module provides a standard interface for making these
manipulations.
For more details and examples, see the `Bijector` docstring.
To apply a `Bijector`, use `distributions.TransformedDistribution`.
## Bijectors
@@Bijector
@@Chain
@@Exp
@@Identity
@@Inline
@@Invert
@@ScaleAndShift
@@SigmoidCentered
@@SoftmaxCentered
@@Softplus
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import re
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
@six.add_metaclass(abc.ABCMeta)
class Bijector(object):
"""Interface for transforming a `Distribution` via `TransformedDistribution`.
A `Bijector` implements a bijective, differentiable function by transforming
an input `Tensor`. The output `Tensor` shape is constrained by the input
`sample`, `batch`, and `event` shape. A `Bijector` is characterized by three
operations:
1. Forward Evaluation
Useful for turning one random outcome into another random outcome from a
different distribution.
2. Inverse Evaluation
Useful for "reversing" a transformation to compute one probability in
terms of another.
3. (log o det o Jacobian o inverse)(x)
"The log of the determinant of the matrix of all first-order partial
derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the det(Jacobian) is the volume of the
transformation and is used to scale the probability.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
Example Use:
- Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x == my_bijector.inverse(fwd_x)
x != my_bijector.forward(fwd_x) # Not equal because g(x) != g(g(x)).
```
- Computing a log-likelihood:
```python
def transformed_log_pdf(bijector, log_pdf, x):
return (bijector.inverse_log_det_jacobian(x) +
log_pdf(bijector.inverse(x)))
```
- Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
Example transformations:
- "Exponential"
```
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
- "ScaleAndShift"
```
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
Example of why a `Bijector` needs to understand sample, batch, event
partitioning:
- Consider the `Exp` `Bijector` applied to a `Tensor` which has sample, batch,
and event (S, B, E) shape semantics. Suppose
the `Tensor`'s partitioned-shape is `(S=[4], B=[2], E=[3, 3])`.
For `Exp`, the shape of the `Tensor` returned by `forward` and `inverse` is
unchanged, i.e., `[4, 2, 3, 3]`. However the shape returned by
`inverse_log_det_jacobian` is `[4, 2]` because the Jacobian is a reduction
over the event dimensions.
Subclass Requirements:
- Typically subclasses implement `_forward` and one or both of:
- `_inverse`, `_inverse_log_det_jacobian`,
- `_inverse_and_inverse_log_det_jacobian`.
- If the `Bijector`'s use is limited to `TransformedDistribution` (or friends
like `QuantizedDistribution`) then depending on your use, you may not need
to implement all of `_forward` and `_inverese` functions. Examples:
1. Sampling (e.g., `sample`) only requires `_forward`.
2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require
`_inverse` (and related).
3. Only calling probability functions on the output of `sample` means
`_inverse` can be implemented as a cache lookup.
See `Example Use` [above] which shows how these functions are used to
transform a distribution. (Note: `_forward` could theoretically be
implemented as a cache lookup but this would require controlling the
underlying sample generation mechanism.)
- If computation can be shared among `_inverse` and
`_inverse_log_det_jacobian` it is preferable to implement
`_inverse_and_inverse_log_det_jacobian`. This usually reduces
graph-construction overhead because a `Distribution`'s implementation of
`log_prob` will need to evaluate both the inverse Jacobian as well as the
inverse function.
- If an additional use case needs just `inverse` or just
`inverse_log_det_jacobian` then he or she may also wish to implement these
functions to avoid computing the `inverse_log_det_jacobian` or the
`inverse`, respectively.
Tips for implementing `_inverse` and `_inverse_log_det_jacobian`:
- As case 3 [above] indicates, under some circumstances the inverse function
can be implemented as a cache lookup.
- The inverse `log o det o Jacobian` can be implemented as the negative of the
forward `log o det o Jacobian`. This is useful if the `inverse` is
implemented as a cache or the inverse Jacobian is computationally more
expensive. The following demonstrates the suggested implementation.
```python
def _inverse_and_log_det_jacobian(self, y):
x = # ... implement inverse, possibly via cache.
return x, -self._forward_log_det_jac(x) # Note negation.
```
By overriding the `_inverse_and_log_det_jacobian` function we have access to
the inverse in one call.
The correctness of this approach can be seen from the following claim.
- Claim:
Assume `Y=g(X)` is a bijection whose derivative exists and is nonzero
for its domain, i.e., `d/dX g(X)!=0`. Then:
```none
(log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X)
```
- Proof:
From the bijective, nonzero differentiability of `g`, the
[inverse function theorem](
https://en.wikipedia.org/wiki/Inverse_function_theorem)
implies `g^{-1}` is differentiable in the image of `g`.
Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields
`I = g'(g^{-1}(y))*g^{-1}'(y)`.
The same theorem also implies `g{-1}'` is non-singular therefore:
`inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`.
The claim follows from [properties of determinant](
https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups).
- If possible, prefer a direct implementation of the inverse Jacobian. This
should have superior numerical stability and will often share subgraphs with
the `_inverse` implementation.
"""
@abc.abstractmethod
def __init__(self,
batch_ndims=None,
event_ndims=None,
parameters=None,
is_constant_jacobian=False,
validate_args=False,
dtype=None,
name=None):
"""Constructs Bijector.
A `Bijector` transforms random variables into new random variables.
Examples:
```python
# Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
identity = Identity(batch_ndims=4, event_ndims=1)
# Create the Y = g(X) = exp(X) transform which operates on matrices.
exp = Exp(batch_ndims=0, event_ndims=2)
```
See `Bijector` subclass docstring for more details and specific examples.
Args:
batch_ndims: number of dimensions associated with batch coordinates.
event_ndims: number of dimensions associated with event coordinates.
parameters: Dictionary of parameters used by this `Bijector`
is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
function of the input.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
enforced.
name: The name to give Ops created by the initializer.
"""
if batch_ndims is None or event_ndims is None:
self._shaper = None # Apparently subclass will create.
else:
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
validate_args=validate_args)
self._parameters = parameters or {}
self._is_constant_jacobian = is_constant_jacobian
self._validate_args = validate_args
self._dtype = dtype
if name:
self._name = name
else:
# We want the default convention to be snake_case rather than CamelCase
# since `Chain` uses bijector.name as the condition_kwargs dictionary key.
def camel_to_snake(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
self._name = camel_to_snake(type(self).__name__)
@property
def shaper(self):
"""Returns shape object used to manage shape constraints."""
return self._shaper
@property
def parameters(self):
"""Returns this `Bijector`'s parameters as a name/value dictionary."""
return self._parameters
@property
def is_constant_jacobian(self):
"""Returns true iff the Jacobian is not a function of x.
Note: Jacobian is either constant for both forward and inverse or neither.
Returns:
`Boolean`.
"""
return self._is_constant_jacobian
@property
def validate_args(self):
"""Returns True if Tensor arguments will be validated."""
return self._validate_args
@property
def dtype(self):
"""dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
def name(self):
"""Returns the string name of this `Bijector`."""
return self._name
def _forward(self, x):
raise NotImplementedError("forward is not implemented.")
def forward(self, x, name="forward", **condition_kwargs):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
NotImplementedError: if `_forward` is not implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
return self._forward(x, **condition_kwargs)
def _inverse(self, y):
raise NotImplementedError("inverse is not implemented")
def inverse(self, y, name="inverse", **condition_kwargs):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
try:
return self._inverse(y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse was not implemented, try to see if it's implemented
# by the _inverse_and_inverse_log_det_jacobian member.
try:
return self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)[0]
except NotImplementedError:
raise original_error
def _inverse_log_det_jacobian(self, y):
raise NotImplementedError("inverse_log_det_jacobian is not implemented.")
def inverse_log_det_jacobian(
self, y, name="inverse_log_det_jacobian", **condition_kwargs):
"""Returns the (log o det o Jacobian o inverse)(y).
Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.)
Note that `forward_log_det_jacobian` is the negative of this function.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse_log_det_jacobian` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
try:
return self._inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse_log_det_jacobian was not implemented, try to see if
# it's implemented by the _inverse_and_inverse_log_det_jacobian member.
try:
return self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)[1]
except NotImplementedError:
raise original_error
def _inverse_and_inverse_log_det_jacobian(self, y):
raise NotImplementedError(
"inverse_and_inverse_log_det_jacobian is not implemented.")
def inverse_and_inverse_log_det_jacobian(
self, y, name="inverse_and_inverse_log_det_jacobian", **condition_kwargs):
"""Returns both the inverse evaluation and inverse_log_det_jacobian.
Enables possibly more efficient calculation when both inverse and
corresponding Jacobian are needed.
See `inverse()`, `inverse_log_det_jacobian()` for more details.
Args:
y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_inverse_and_inverse_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [y]):
y = ops.convert_to_tensor(y, name="y")
self._maybe_assert_dtype(y)
try:
return self._inverse_and_inverse_log_det_jacobian(
y, **condition_kwargs)
except NotImplementedError as original_error:
# Since _inverse_and_inverse_log_det_jacobian was not implemented, try
# to see if we can separately use _inverse and
# _inverse_log_det_jacobian members.
try:
return (self._inverse(y, **condition_kwargs),
self._inverse_log_det_jacobian(y, **condition_kwargs))
except NotImplementedError:
raise original_error
def _forward_log_det_jacobian(self, x):
raise NotImplementedError(
"forward_log_det_jacobian is not implemented.")
def forward_log_det_jacobian(
self, x, name="forward_log_det_jacobian", **condition_kwargs):
"""Returns both the forward_log_det_jacobian.
Args:
x: `Tensor`. The input to the "forward" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
`self.dtype`.
NotImplementedError: if neither `_forward_log_det_jacobian`
nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
try:
return self._forward_log_det_jacobian(x, **condition_kwargs)
except NotImplementedError as original_error:
try:
y = self.inverse(x, **condition_kwargs)
return -self.inverse_log_det_jacobian(y, **condition_kwargs)
except NotImplementedError:
raise original_error
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + list(self.parameters.values()))) as scope:
yield scope
def _maybe_assert_dtype(self, x):
"""Helper to check dtype when self.dtype is known."""
if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:
raise TypeError("Input had dtype %s but expected %s." %
(self.dtype, x.dtype))
class Inline(Bijector):
# pylint: disable=line-too-long
"""Bijector constructed from callables implementing forward, inverse, and inverse_log_det_jacobian.
Example Use:
```python
exp = Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), reduction_indices=-1)),
name="Exp")
```
The above example is equivalent to the `Bijector` `Exp(event_ndims=1)`.
"""
# pylint: enable=line-too-long
def __init__(self,
forward_fn=None,
inverse_fn=None,
inverse_log_det_jacobian_fn=None,
forward_log_det_jacobian_fn=None,
is_constant_jacobian=False,
validate_args=False,
name="inline"):
"""Creates a `Bijector` from callables.
Args:
forward_fn: Python callable implementing the forward transformation.
inverse_fn: Python callable implementing the inverse transformation.
inverse_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the inverse transformation.
forward_log_det_jacobian_fn: Python callable implementing the
log o det o jacobian of the forward transformation.
is_constant_jacobian: `Boolean` indicating that the Jacobian is constant
for all input arguments.
validate_args: `Boolean` indicated whether arguments should be checked for
correctness.
name: `String`, name given to ops managed by this object.
"""
super(Inline, self).__init__(
batch_ndims=0,
event_ndims=0,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
self._forward_fn = forward_fn
self._inverse_fn = inverse_fn
self._inverse_log_det_jacobian_fn = inverse_log_det_jacobian_fn
self._forward_log_det_jacobian_fn = forward_log_det_jacobian_fn
def _forward(self, x, **condition_kwargs):
if not callable(self._forward_fn):
raise NotImplementedError(
"forward_fn is not a callable function.")
return self._forward_fn(x, **condition_kwargs)
def _inverse(self, y, **condition_kwargs):
if not callable(self._inverse_fn):
raise NotImplementedError(
"inverse_fn is not a callable function.")
return self._inverse_fn(y, **condition_kwargs)
def _inverse_log_det_jacobian(self, y, **condition_kwargs):
if not callable(self._inverse_log_det_jacobian_fn):
raise NotImplementedError(
"inverse_log_det_jacobian_fn is not a callable function.")
return self._inverse_log_det_jacobian_fn(y, **condition_kwargs)
def _forward_log_det_jacobian(self, y, **condition_kwargs):
if not callable(self._forward_log_det_jacobian_fn):
raise NotImplementedError(
"forward_log_det_jacobian_fn is not a callable function.")
return self._forward_log_det_jacobian_fn(y, **condition_kwargs)
class Invert(Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
Gamma(alpha=1., beta=2.),
bijector.Invert(bijector.Exp())
```
"""
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **condition_kwargs)
return -self.inverse_log_det_jacobian(y, **condition_kwargs)
```
Args:
bijector: Bijector instance.
validate_args: `Boolean` indicated whether arguments should be checked for
correctness.
name: `String`, name given to ops managed by this object.
"""
self._bijector = bijector
super(Invert, self).__init__(
parameters=bijector.parameters,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
self._shaper = bijector.shaper
@property
def bijector(self):
return self._bijector
def _forward(self, x, **condition_kwargs):
return self.bijector.inverse(x, **condition_kwargs)
def _inverse_and_inverse_log_det_jacobian(self, y, **condition_kwargs):
return (self.bijector.forward(y, **condition_kwargs),
self.bijector.forward_log_det_jacobian(y, **condition_kwargs))
def _forward_log_det_jacobian(self, x, **condition_kwargs):
return self.bijector.inverse_log_det_jacobian(x, **condition_kwargs)
class Chain(Bijector):
"""Bijector which applies a sequence of bijectors.
Example Use:
```python
chain = Chain([Exp(), Softplus()], name="one_plus_exp")
```
Results in:
* Forward:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).forward(x)
= exp.forward(softplus.forward(x))
= tf.exp(tf.log(1. + tf.exp(x)))
= 1. + tf.exp(x)
```
* Inverse:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).inverse(y)
= softplus.inverse(exp.inverse(y))
= tf.log(tf.exp(tf.log(y)) - 1.)
= tf.log(y - 1.)
```
"""
def __init__(self, bijectors=(), validate_args=False, name=None):
"""Instantiates `Chain` bijector.
Args:
bijectors: Python list of bijector instances. An empty list makes this
bijector equivalent to the `Identity` bijector.
validate_args: `Boolean` indicated whether arguments should be checked for
correctness.
name: `String`, name given to ops managed by this object. Default: E.g.,
`Chain([Exp(), Softplus()]).name == "chain_of_exp_of_softplus"`.
Raises:
ValueError: if bijectors have different dtypes.
"""
self._bijectors = bijectors
dtype = list(set([b.dtype for b in bijectors]))
if len(dtype) > 2:
raise ValueError("incompatible dtypes: %s" % dtype)
elif len(dtype) == 2:
dtype = dtype[1] if dtype[0] is None else dtype[0]
elif len(dtype) == 1:
dtype = dtype[0]
else:
dtype = None
super(Chain, self).__init__(
parameters=dict(("=".join([b.name, k]), v)
for b in bijectors
for k, v in b.parameters.items()),
is_constant_jacobian=all([b.is_constant_jacobian
for b in bijectors]),
validate_args=validate_args,
dtype=dtype,
name=name or ("identity" if not bijectors else
"_of_".join(["chain"] + [b.name for b in bijectors])))
@property
def bijectors(self):
return self._bijectors
def _forward(self, x, **condition_kwargs):
y = x
for b in reversed(self.bijectors):
y = b.forward(y, **condition_kwargs.get(b.name, {}))
return y
def _inverse_and_inverse_log_det_jacobian(self, y, **condition_kwargs):
x = y
ildj = constant_op.constant(0., dtype=x.dtype,
name="inverse_log_det_jacobian")
for b in self.bijectors:
x, j = b.inverse_and_inverse_log_det_jacobian(
x, **condition_kwargs.get(b.name, {}))
ildj += j
return x, ildj
def _forward_log_det_jacobian(self, x, **condition_kwargs):
y = x
fldj = constant_op.constant(0., dtype=x.dtype,
name="forward_log_det_jacobian")
for b in reversed(self.bijectors):
bijector_condition_kwargs = condition_kwargs.get(b.name, {})
fldj += b.forward_log_det_jacobian(y, **bijector_condition_kwargs)
y = b.forward(y, **bijector_condition_kwargs)
return fldj
class Identity(Bijector):
"""Bijector which computes Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity(batch_ndims=1, event_ndims=1)
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
def __init__(self, validate_args=False, name="identity"):
super(Identity, self).__init__(
batch_ndims=0,
event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
self._is_constant_jacobian = True
def _forward(self, x):
return x
def _inverse_and_inverse_log_det_jacobian(self, y):
return y, constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
class Exp(Bijector):
"""Bijector which computes Y = g(X) = exp(X).
Example Use:
```python
# Create the Y=g(X)=exp(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
exp = Exp(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
exp(x) == exp.forward(x)
log(x) == exp.inverse(x)
```
Note: the exp(.) is applied element-wise but the Jacobian is a reduction
over the event space.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="exp"):
"""Instantiates the `Exp` bijector.
Args:
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: `Boolean` indicated whether arguments should be checked for
correctness.
name: `String` name given to ops managed by this object.
"""
super(Exp, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse_and_inverse_log_det_jacobian(self, y):
x = math_ops.log(y)
return x, -self._forward_log_det_jacobian(x)
def _forward_log_det_jacobian(self, x):
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
return math_ops.reduce_sum(x, reduction_indices=event_dims)
class ScaleAndShift(Bijector):
"""Bijector which computes Y = g(X; shift, scale) = scale * X + shift.
Example Use:
```python
# No batch, scalar.
mu = 0 # shape=[]
sigma = 1 # shape=[]
b = ScaleAndShift(shift=mu, scale=sigma)
# b.shaper.batch_ndims == 0
# b.shaper.event_ndims == 0
# One batch, scalar.
mu = ... # shape=[b], b>0
sigma = ... # shape=[b], b>0
b = ScaleAndShift(shift=mu, scale=sigma)
# b.shaper.batch_ndims == 1
# b.shaper.event_ndims == 0
# No batch, multivariate.
mu = ... # shape=[d], d>0
sigma = ... # shape=[d, d], d>0
b = ScaleAndShift(shift=mu, scale=sigma, event_ndims=1)
# b.shaper.batch_ndims == 0
# b.shaper.event_ndims == 1
# (B1*B2*...*Bb)-batch, multivariate.
mu = ... # shape=[B1,...,Bb, d], b>0, d>0
sigma = ... # shape=[B1,...,Bb, d, d], b>0, d>0
b = ScaleAndShift(shift=mu, scale=sigma, event_ndims=1)
# b.shaper.batch_ndims == b
# b.shaper.event_ndims == 1
# Mu is broadcast:
mu = 1
sigma = [I, I] # I is a 3x3 identity matrix.
b = ScaleAndShift(shift=mu, scale=sigma, event_ndims=1)
x = numpy.ones(S + sigma.shape)
b.forward(x) # == x + 1
```
"""
def __init__(self,
shift,
scale,
event_ndims=0,
validate_args=False,
name="scale_and_shift"):
"""Instantiates the `Exp` bijector.
Args:
shift: `Tensor` used to shift input, i.e., `Y = g(X) = scale * X + shift`.
scale: `Tensor` used to scale input, i.e., `Y = g(X) = scale * X + shift`.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: `Boolean` indicated whether arguments should be checked for
correctness.
name: `String` name given to ops managed by this object.
"""
self._parameters = {}
self._name = name
with self._name_scope("init", values=[shift, scale, event_ndims]):
self._shift = ops.convert_to_tensor(shift, name="shift")
self._scale = ops.convert_to_tensor(scale, name="scale")
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if self.shift.dtype.base_dtype != self.scale.dtype.base_dtype:
raise TypeError("%s.dtype=%s does not match %s.dtype=%s" %
(self.shift.name, self.shift.dtype, self.scale.name,
self.scale.dtype))
if event_ndims.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s does not match %s" %
(event_ndims.name, event_ndims.dtype, dtypes.int32))
self._scale, batch_ndims = self._process_scale(self.scale, event_ndims)
super(ScaleAndShift, self).__init__(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
parameters={"shift": self.shift, "scale": self.scale},
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _process_scale(self, scale, event_ndims):
"""Helper to __init__ which gets scale in batch-ready form.
This function expands dimensions of `scale` according to the following
table:
event_ndims
scale.ndims 0 1
0 [1]+S+[1,1] "silent error"
1 [ ]+S+[1,1] "silent error"
2 [ ]+S+[1,1] [1]+S+[ ]
3 [ ]+S+[1,1] [ ]+S+[ ]
... (same) (same)
The idea is that we want to convert `scale` into something which can always
work for, say, the left-hand argument of `batch_matmul`.
Args:
scale: `Tensor`.
event_ndims: `Tensor` (0D, `int32`).
Returns:
scale: `Tensor` with dims expanded according to [above] table.
batch_ndims: `Tensor` (0D, `int32`). The ndims of the `batch` portion.
"""
ndims = array_ops.rank(scale)
left = math_ops.select(
math_ops.reduce_any([
math_ops.reduce_all([
math_ops.equal(ndims, 0),
math_ops.equal(event_ndims, 0)
]),
math_ops.reduce_all([
math_ops.equal(ndims, 2),
math_ops.equal(event_ndims, 1)
])]), 1, 0)
right = math_ops.select(math_ops.equal(event_ndims, 0), 2, 0)
pad = array_ops.concat(0, (
array_ops.ones([left], dtype=dtypes.int32),
array_ops.shape(scale),
array_ops.ones([right], dtype=dtypes.int32)))
scale = array_ops.reshape(scale, pad)
batch_ndims = ndims - 2 + right
return scale, batch_ndims
@property
def shift(self):
return self._shift
@property
def scale(self):
return self._scale
def _forward(self, x):
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(x)
x = math_ops.batch_matmul(self.scale, x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
x += self.shift
return x
def _inverse(self, y):
x = y - self.shift
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(x)
x = linalg_ops.matrix_triangular_solve(self.scale, x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
return x
def _inverse_log_det_jacobian(self, y): # pylint: disable=unused-argument
return -math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(self.scale)),
reduction_indices=[-1])
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
return -self._inverse_log_det_jacobian(x)
class Softplus(Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softplus"):
super(Softplus, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
return nn_ops.softplus(x)
def _inverse_and_inverse_log_det_jacobian(self, y):
# The most stable inverse of softplus is not the most direct one.
# y = softplus(x) = Log[1 + exp{x}], (which means y > 0).
# ==> exp{y} = 1 + exp{x}
# ==> x = Log[exp{y} - 1]
# = Log[(exp{y} - 1) / exp{y}] + Log[exp{y}]
# = Log[(1 - exp{-y}) / 1] + Log[exp{y}]
# = Log[1 - exp{-y}] + y
# Recalling y > 0, you see that this is more stable than Log[exp{y} - 1].
#
# Stable inverse log det jacobian.
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for Y > 0.
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(y)
log_one_minus_exp_neg = math_ops.log(1. - math_ops.exp(-y))
x = y + log_one_minus_exp_neg
ildj = -math_ops.reduce_sum(
log_one_minus_exp_neg, reduction_indices=event_dims)
return x, ildj
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
return -math_ops.reduce_sum(
nn_ops.softplus(-x), reduction_indices=event_dims)
class SoftmaxCentered(Bijector):
"""Bijector which computes `Y = g(X) = exp([X 0]) / sum(exp([X 0]))`.
To implement [softmax](https://en.wikipedia.org/wiki/Softmax_function) as a
bijection, the forward transformation appends a value to the input and the
inverse removes this coordinate. The appended coordinate represents a pivot,
e.g., `softmax(x) = exp(x-c) / sum(exp(x-c))` where `c` is the implicit last
coordinate.
Because we append a coordinate, this bijector only supports `event_ndim in [0,
1]`, i.e., scalars and vectors.
Example Use:
```python
bijector.SoftmaxCentered(event_ndims=1).forward(tf.log([2, 3, 4]))
# Result: [0.2, 0.3, 0.4, 0.1]
# Extra result: 0.1
bijector.SoftmaxCentered(event_ndims=1).inverse([0.2, 0.3, 0.4, 0.1])
# Result: tf.log([2, 3, 4])
# Extra coordinate removed.
```
At first blush it may seem like the [Invariance of domain](
https://en.wikipedia.org/wiki/Invariance_of_domain) theorem implies this
implementation is not a bijection. However, the appended dimension
makes the (forward) image non-open and the theorem does not directly apply.
"""
def __init__(self,
event_ndims=0,
validate_args=False,
name="softmax_centered"):
self._parameters = {}
self._name = name
with self._name_scope("init", values=[event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims is None or event_ndims not in [0, 1]:
raise ValueError("`event_ndims` must be a TF constant which is 0 or 1")
self._static_event_ndims = event_ndims
super(SoftmaxCentered, self).__init__(
batch_ndims=0, # We'll regard all non-event dims as sample dims.
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
y = x
# Pad the event_ndims with a zeros vector. We need this because it lets
# us infer the scale in the inverse function.
if self._static_event_ndims == 0:
y = array_ops.expand_dims(y, dim=-1)
zeros = array_ops.zeros_like(y)
else:
shape = array_ops.concat(0, (array_ops.shape(x)[:-1], [1]))
zeros = array_ops.zeros(shape, dtype=y.dtype)
y = array_ops.concat(array_ops.rank(y)-1, (y, zeros))
# Set shape hints.
if x.get_shape().ndims is not None:
shape = x.get_shape().as_list()
if self._static_event_ndims == 0:
shape += [2]
elif shape[-1] is not None:
shape[-1] += 1
shape = tensor_shape.TensorShape(shape)
y.get_shape().assert_is_compatible_with(shape)
y.set_shape(shape)
# Since we only support event_ndims in [0, 1] and we do padding, we always
# reduce over the last dimension, i.e., dim=-1 (which is the default).
return nn_ops.softmax(y)
def _inverse(self, y):
# To derive the inverse mapping note that:
# y[i] = exp(x[i]) / normalization
# and
# y[end] = 1 / normalization.
# Thus:
# x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
# = log(exp(x[i])/normalization) - log(y[end])
# = log(y[i]) - log(y[end])
shape = (np.asarray(y.get_shape().as_list(), dtype=np.int32)
if y.get_shape().is_fully_defined()
else array_ops.shape(y, name="shape"))
ndims = y.get_shape().ndims or math_ops.rank(y, name="ndims")
# Do this first to make sure CSE catches that it'll happen again in
# _inverse_log_det_jacobian.
x = math_ops.log(y)
# We now extract the last coordinate of the rightmost dimension.
# Our trick is to slice from [0,0,...,shape[-1]-1] to shape[:-1]+[1].
begin = array_ops.one_hot(indices=ndims-1,
depth=ndims,
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
dtype=shape.dtype)
size = array_ops.concat(0, (shape[:-1], np.asarray([1], dtype=shape.dtype)))
log_normalization = -array_ops.slice(x, begin, size)
# Here we slice out all but the last coordinate; see above for idea.
begin = array_ops.zeros_like(shape)
size = array_ops.concat(0, (shape[:-1], [shape[-1]-1]))
x = array_ops.slice(x, begin, size)
x += log_normalization
if self._static_event_ndims == 0:
x = array_ops.squeeze(x, squeeze_dims=[ndims-1])
# Set shape hints.
if y.get_shape().ndims is not None:
shape = y.get_shape().as_list()
if self._static_event_ndims == 0:
shape = shape[:-1]
elif shape[-1] is not None:
shape[-1] -= 1
shape = tensor_shape.TensorShape(shape)
x.get_shape().assert_is_compatible_with(shape)
x.set_shape(shape)
return x
def _inverse_log_det_jacobian(self, y):
# WLOG, consider the vector case:
# x = log(y[:-1]) - log(y[-1])
# where,
# y[-1] = 1 - sum(y[:-1]).
# We have:
# det{ dX/dY } = det{ diag(1 ./ y[:-1]) + 1 / y[-1] }
# = det{ inv{ diag(y[:-1]) - y[:-1]' y[:-1] } } (1)
# = 1 / det{ diag(y[:-1]) - y[:-1]' y[:-1] }
# = 1 / { (1 + y[:-1]' inv(diag(y[:-1])) y[:-1]) *
# det(diag(y[:-1])) } (2)
# = 1 / { y[-1] prod(y[:-1]) }
# = 1 / prod(y)
# (1) - https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula
# or by noting that det{ dX/dY } = 1 / det{ dY/dX } from Bijector
# docstring "Tip".
# (2) - https://en.wikipedia.org/wiki/Matrix_determinant_lemma
return -math_ops.reduce_sum(math_ops.log(y), reduction_indices=-1)
def _forward_log_det_jacobian(self, x):
if self._static_event_ndims == 0:
return x - 2. * nn_ops.softplus(x)
else:
# This code is similar to nn_ops.log_softmax but different because we have
# an implicit zero column to handle. I.e., instead of:
# reduce_sum(logits - reduce_sum(exp(logits), dim))
# we must do:
# log_normalization = 1 + reduce_sum(exp(logits))
# -log_normalization + reduce_sum(logits - log_normalization)
log_normalization = nn_ops.softplus(
math_ops.reduce_logsumexp(x, reduction_indices=-1, keep_dims=True))
fldj = (-log_normalization +
math_ops.reduce_sum(x - log_normalization,
reduction_indices=-1,
keep_dims=True))
return array_ops.squeeze(fldj, squeeze_dims=-1)
class SigmoidCentered(SoftmaxCentered):
"""Bijector which computes Y = g(X) = exp([X 0]) / (1 + exp(-X)).
Equivalent to: `bijector.SoftmaxCentered(event_ndims=0)`.
See `bijector.SoftmaxCentered` for more details.
"""
def __init__(self, validate_args=False, name="sigmoid_centered"):
super(SigmoidCentered, self).__init__(
validate_args=validate_args, name=name)
|
madeso/prettygood
|
refs/heads/master
|
dotnet/Tagger/Song.py
|
1
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using PrettyGood.Util;
namespace Tagger
{
public class Song
{
private string Resolve(Func<IdTag, string> get)
{
string s = get(tags);
if( string.IsNullOrEmpty(s) ) return "<empty>";
else return s;
}
private void Set(Action<IdTag> set)
{
set(tags);
}
public string FilePath;
// v1
public string Artist
{
get
{
return Resolve(x => x.Artist);
}
set
{
Set(t => t.Artist = value);
}
}
public string Title
{
get
{
return Resolve(x => x.Title);
}
set
{
Set(t => t.Title = value);
}
}
public string Album
{
get
{
return Resolve(x => x.Album);
}
set
{
Set(t => t.Album = value);
}
}
public string TrackNumber
{
get
{
return Resolve(x => x.TrackNumber);
}
set
{
Set(t => t.TrackNumber = value);
}
}
public string Genre
{
get
{
return Resolve(x => x.Genre);
}
set
{
Set(t => t.Genre = value);
}
}
public string Year
{
get
{
return Resolve(x => x.Year);
}
set
{
Set(t => t.Year = value);
}
}
public string Comments
{
get
{
return Resolve(x => x.Comments);
}
set
{
Set(t => t.Comments = value);
}
}
// id v3
public bool IsCover
{
get
{
return tags.IsCover;
}
set
{
tags.IsCover = value;
}
}
public bool IsRemix
{
get
{
return tags.IsRemix;
}
set
{
tags.IsRemix = value;
}
}
public string TotalTracks
{
get
{
return Resolve(x => x.TotalTracks);
}
set
{
Set(t => t.TotalTracks = value);
}
}
IdTag tags = new IdTag();
public void readTags()
{
IdTag t = new IdTag();
t.readTagsFromFile(FilePath);
set(t);
}
public void saveTags()
{
tags.saveTagsToFile(FilePath);
}
internal void set(IdTag t)
{
tags.merge(t);
}
}
}
|
ludovicoloreti/LexicApp
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/generator/make.py
|
896
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
llhe/tensorflow
|
refs/heads/master
|
tensorflow/contrib/rnn/python/kernel_tests/gru_ops_test.py
|
21
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Block GRU module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.rnn.python.ops import gru_ops
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class GRUBlockCellTest(test.TestCase):
def testNoneDimsWithDynamicRNN(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
num_steps = 7
cell = gru_ops.GRUBlockCell(cell_size)
x = array_ops.placeholder(dtypes.float32, shape=(None, None, input_size))
_, output = rnn.dynamic_rnn(
cell, x, time_major=True, dtype=dtypes.float32)
sess.run(variables.global_variables_initializer())
feed = {}
feed[x] = np.random.randn(num_steps, batch_size, input_size)
sess.run(output, feed)
def testBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = rnn_cell.GRUCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([output], {x: x_value, h: h_value})
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
block_res = sess.run([output], {x: x_value, h: h_value})
self.assertEqual(len(block_res), len(basic_res))
for block, basic in zip(block_res, basic_res):
self.assertAllClose(block, basic)
def testBlockGRUToGRUCellMultiStep(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 2
cell_size = 3
input_size = 3
time_steps = 4
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_values = np.random.rand(time_steps, batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_x: x_values, h: h_value}
sess.run([variables.global_variables_initializer()])
block_res = sess.run([outputs_dynamic, state_dynamic], feeds)
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = rnn_cell.GRUCell(cell_size)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_x: x_values, h: h_value}
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([outputs_dynamic, state_dynamic], feeds)
# Check the lengths of the outputs_dynamic, and states.
self.assertEqual(len(block_res), len(basic_res))
self.assertEqual(len(block_res[0]), len(basic_res[0]))
self.assertEqual(len(block_res[1]), len(basic_res[1]))
# Check the outputs_dynamic values.
for block_output, basic_output in zip(block_res[0], basic_res[0]):
self.assertAllClose(block_output, basic_output)
# Check the state_dynamic value.
self.assertAllClose(block_res[1], block_res[1])
def testDerivativeOfBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 2
cell_size = 3
input_size = 4
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Gradients from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()[0:4]
[w_ru, b_ru, w_c, b_c] = all_variables
d_new_h_wrt_x = gradients_impl.gradients([output], x)
d_new_h_wrt_h = gradients_impl.gradients([output], h)
d_new_h_wrt_w_ru = gradients_impl.gradients([output], w_ru)
d_new_h_wrt_w_c = gradients_impl.gradients([output], w_c)
d_new_h_wrt_b_ru = gradients_impl.gradients([output], b_ru)
d_new_h_wrt_b_c = gradients_impl.gradients([output], b_c)
d_block_res = sess.run([
d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru, d_new_h_wrt_w_c,
d_new_h_wrt_b_ru, d_new_h_wrt_b_c
], {x: x_value,
h: h_value})
# Gradients from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = rnn_cell.GRUCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()[4:8]
[w_ru, b_ru, w_c, b_c] = all_variables
d_new_h_wrt_x = gradients_impl.gradients([output], x)
d_new_h_wrt_h = gradients_impl.gradients([output], h)
d_new_h_wrt_w_ru = gradients_impl.gradients([output], w_ru)
d_new_h_wrt_w_c = gradients_impl.gradients([output], w_c)
d_new_h_wrt_b_ru = gradients_impl.gradients([output], b_ru)
d_new_h_wrt_b_c = gradients_impl.gradients([output], b_c)
d_basic_res = sess.run([
d_new_h_wrt_x, d_new_h_wrt_h, d_new_h_wrt_w_ru, d_new_h_wrt_w_c,
d_new_h_wrt_b_ru, d_new_h_wrt_b_c
], {x: x_value,
h: h_value})
# Check lengths of derivative results.
self.assertEqual(len(d_block_res), len(d_basic_res))
# Check the value of every derivative result.
for block, basic in zip(d_block_res, d_basic_res):
self.assertAllClose(block, basic)
def testDerivativeOfBlockGRUToGRUCellMultiSteps(self):
batch_size = 2
cell_size = 3
input_size = 4
time_steps = 2
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
h = array_ops.zeros([batch_size, cell_size])
# Values for the inputs.
x_values = np.random.rand(time_steps, batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
feeds = {concat_x: x_values, h: h_value}
# Gradients from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
grad_output_wrt_x = gradients_impl.gradients([outputs_dynamic[0]],
concat_x)
grad_output_wrt_h = gradients_impl.gradients([outputs_dynamic[0]], h)
sess.run([variables.global_variables_initializer()])
block_grad_res_x, block_grad_res_h = sess.run(
[grad_output_wrt_x, grad_output_wrt_h], feeds)
# Gradients from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
grad_output_wrt_x = gradients_impl.gradients([outputs_dynamic[0]],
concat_x)
grad_output_wrt_h = gradients_impl.gradients([outputs_dynamic[0]], h)
sess.run([variables.global_variables_initializer()])
basic_grad_res_x, basic_grad_res_h = sess.run(
[grad_output_wrt_x, grad_output_wrt_h], feeds)
# Check derivatives values of the outputs wrt to x.
self.assertEqual(len(block_grad_res_x), len(basic_grad_res_x))
# Check derivatives values of the outputs wrt to h.
for block, basic in zip(block_grad_res_x, basic_grad_res_x):
self.assertAllClose(block, basic)
# Check derivatives values of the outputs wrt to x.
self.assertEqual(len(block_grad_res_h), len(basic_grad_res_h))
# Check derivatives values of the outputs wrt to h.
for block, basic in zip(block_grad_res_h, basic_grad_res_h):
self.assertAllClose(block, basic)
def testGradient(self):
with self.test_session(use_gpu=True, graph=ops.Graph()) as sess:
batch_size = 1
cell_size = 3
input_size = 2
# Inputs
x = array_ops.zeros([batch_size, input_size])
h = array_ops.zeros([batch_size, cell_size])
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([variables.global_variables_initializer()])
all_variables = variables.global_variables()
[w_ru, b_ru, w_c, b_c] = all_variables[:4]
error_x = gradient_checker.compute_gradient_error(
x, (batch_size, input_size), output[0], (batch_size, cell_size))
error_h = gradient_checker.compute_gradient_error(h,
(batch_size, cell_size),
output[0],
(batch_size, cell_size))
error_w_ru = gradient_checker.compute_gradient_error(
w_ru, (input_size + cell_size, 2 * cell_size), output[0],
(batch_size, cell_size))
error_w_c = gradient_checker.compute_gradient_error(
w_c, (input_size + cell_size, cell_size), output[0],
(batch_size, cell_size))
error_b_ru = gradient_checker.compute_gradient_error(
b_ru, (2 * cell_size,), output[0], (batch_size, cell_size))
error_b_c = gradient_checker.compute_gradient_error(
b_c, (cell_size,), output[0], (batch_size, cell_size))
eps = 1e-4
self.assertLess(error_x, eps)
self.assertLess(error_h, eps)
self.assertLess(error_w_ru, eps)
self.assertLess(error_w_c, eps)
self.assertLess(error_b_ru, eps)
self.assertLess(error_b_c, eps)
#### Benchmarking GRUBlockCell vs GRUCell.
def time_taken_by_op(op, sess, num_runs=50):
"""Time taken by the Op."""
for _ in range(2):
sess.run([op])
start_time = time.time()
for _ in range(num_runs):
sess.run([op])
end_time = time.time()
time_taken = end_time - start_time
return time_taken
def training_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
time_steps,
use_gpu=False,
iters=30):
"""Benchmark training speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
# Specify the device which is been used.
with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = vs.get_variable("concat_x",
[time_steps, batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
y = vs.get_variable("y", [time_steps, batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
cost = math_ops.reduce_mean(math_ops.square(outputs_dynamic - y))
learning_rate = 0.01
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate).minimize(cost)
# time for a training step.
basic_time_training = time_taken_by_op(optimizer, sess, iters)
# Output from the basic GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
cost = math_ops.reduce_mean(math_ops.square(outputs_dynamic - y))
learning_rate = 0.01
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate).minimize(cost)
# time for a training step.
block_time_training = time_taken_by_op(optimizer, sess, iters)
performance_training = (
basic_time_training - block_time_training) * 100 / basic_time_training
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(time_steps), str(
use_gpu), str(basic_time_training), str(block_time_training), str(
performance_training)
]))
return basic_time_training, block_time_training
def inference_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
time_steps,
use_gpu=False,
iters=30):
"""Benchmark inference speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
# Random initializers.
seed = 1994
initializer = init_ops.random_uniform_initializer(-1, 1, seed=seed)
np.random.seed(seed)
# Inputs
concat_x = vs.get_variable("concat_x",
[time_steps, batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
cell = rnn_cell.GRUCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
basic_time_inference = time_taken_by_op(outputs_dynamic, sess, iters)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
cell = gru_ops.GRUBlockCell(cell_size)
outputs_dynamic, _ = rnn.dynamic_rnn(
cell,
inputs=concat_x,
initial_state=h,
time_major=True,
dtype=dtypes.float32)
sess.run([variables.global_variables_initializer()])
block_time_inference = time_taken_by_op(outputs_dynamic, sess, iters)
performance_inference = (basic_time_inference - block_time_inference
) * 100 / basic_time_inference
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(time_steps), str(
use_gpu), str(basic_time_inference), str(block_time_inference), str(
performance_inference)
]))
return basic_time_inference, block_time_inference
def single_bprop_step_gru_block_vs_gru_cell(batch_size,
cell_size,
input_size,
use_gpu=False,
iters=30):
"""Benchmark single bprop step speed between GRUBlockCell vs GRUCell."""
ops.reset_default_graph()
with session.Session(graph=ops.Graph()) as sess:
with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
initializer = init_ops.random_uniform_initializer(-1, 1, seed=1989)
# Inputs
x = vs.get_variable("x", [batch_size, input_size])
h = vs.get_variable("h", [batch_size, cell_size])
# Output from the basic GRU cell implementation.
with vs.variable_scope("basic", initializer=initializer):
output = rnn_cell.GRUCell(cell_size)(array_ops.identity(x),
array_ops.identity(h))
sess.run([variables.global_variables_initializer()])
grad_output_wrt_input = gradients_impl.gradients([output], h)
basic_time_bprop = time_taken_by_op(grad_output_wrt_input, sess, iters)
# Output from the block GRU cell implementation.
with vs.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(array_ops.identity(x),
array_ops.identity(h))
sess.run([variables.global_variables_initializer()])
grad_output_wrt_input = gradients_impl.gradients([output], h)
block_time_bprop = time_taken_by_op(grad_output_wrt_input, sess, iters)
performance_inference = (
basic_time_bprop - block_time_bprop) * 100 / basic_time_bprop
print(",".join([
str(batch_size), str(cell_size), str(input_size), str(use_gpu), str(
basic_time_bprop), str(block_time_bprop), str(performance_inference)
]))
return basic_time_bprop, block_time_bprop
class BenchmarkGRUBlock(test.Benchmark):
def benchmarkTrainingBlockGRUVsGRUCell(self):
print("Comparison GRUBlockCell vs GRUCell")
print("--------------------------------------------------------------")
print("Training speed GRUBlockCell vs GRUCell")
print("batch_size, cell_size, input_size, time_steps, GPU, "
"basic_time_training, block_time_training, performance_training[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
for time_steps in [50]:
basic_time, block_time = training_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, time_steps, use_gpu, iters)
self.report_benchmark(
name="GRUCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_training_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=block_time)
def benchmarkInferenceBlockGRUVsGRUCell(self):
print("--------------------------------------------------------------")
print("Inference speed GRUBlockCell vs GRUCell")
print(
"batch_size, cell_size, input_size, time_steps, GPU, "
"basic_time_inference, block_time_inference, performance_inference[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
for time_steps in [50]:
basic_time, block_time = inference_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, time_steps, use_gpu, iters)
self.report_benchmark(
name="GRUCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s" %
(batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_inference_time_BS%i_CS%i_IS%i_TS%i_gpu_%s"
% (batch_size, cell_size, input_size, time_steps, use_gpu),
iters=iters,
wall_time=block_time)
def benchmarkSingleBpropStepBlockGRUVsGRUCell(self):
print("--------------------------------------------------------------")
print("Single bprop step speed GRUBlockCell vs GRUCell")
print("batch_size, cell_size, input_size, GPU, basic_time, "
"block_time, performance_inference[%]")
iters = 10
for use_gpu in [True, False]:
for batch_size in [1, 32, 128]:
for cell_size in [128, 512]:
for input_size in [128, 512]:
basic_time, block_time = single_bprop_step_gru_block_vs_gru_cell(
batch_size, cell_size, input_size, use_gpu, iters)
self.report_benchmark(
name="GRUCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s" %
(batch_size, cell_size, input_size, use_gpu),
iters=iters,
wall_time=basic_time)
self.report_benchmark(
name="GRUBlockCell_Bprop_single_step_time_BS%i_CS%i_IS%i_gpu_%s"
% (batch_size, cell_size, input_size, use_gpu),
iters=iters,
wall_time=block_time)
print("--------------------------------------------------------------")
if __name__ == "__main__":
test.main()
|
chromium/chromium
|
refs/heads/master
|
third_party/android_deps/libs/org_apache_maven_wagon_wagon_provider_api/3pp/fetch.py
|
6
|
#!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://repo.maven.apache.org/maven2/org/apache/maven/wagon/wagon-provider-api/1.0-beta-6/wagon-provider-api-1.0-beta-6.jar'
_FILE_NAME = 'wagon-provider-api-1.0-beta-6.jar'
_FILE_VERSION = '1.0-beta-6'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
frederick-masterton/django
|
refs/heads/master
|
django/contrib/webdesign/tests.py
|
42
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.contrib.webdesign.lorem_ipsum import paragraphs, words
from django.template import loader, Context
class WebdesignTest(unittest.TestCase):
def test_words(self):
self.assertEqual(words(7), 'lorem ipsum dolor sit amet consectetur adipisicing')
def test_paragraphs(self):
self.assertEqual(paragraphs(1),
['Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'])
def test_lorem_tag(self):
t = loader.get_template_from_string("{% load webdesign %}{% lorem 3 w %}")
self.assertEqual(t.render(Context({})),
'lorem ipsum dolor')
|
onceuponatimeforever/oh-mainline
|
refs/heads/master
|
vendor/packages/docutils/docutils/writers/manpage.py
|
113
|
# -*- coding: utf-8 -*-
# $Id: manpage.py 7628 2013-03-09 10:19:35Z grubert $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module is put into the public domain.
"""
Simple man page writer for reStructuredText.
Man pages (short for "manual pages") contain system documentation on unix-like
systems. The pages are grouped in numbered sections:
1 executable programs and shell commands
2 system calls
3 library functions
4 special files
5 file formats
6 games
7 miscellaneous
8 system administration
Man pages are written *troff*, a text file formatting system.
See http://www.tldp.org/HOWTO/Man-Page for a start.
Man pages have no subsection only parts.
Standard parts
NAME ,
SYNOPSIS ,
DESCRIPTION ,
OPTIONS ,
FILES ,
SEE ALSO ,
BUGS ,
and
AUTHOR .
A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
by the command whatis or apropos.
"""
__docformat__ = 'reStructuredText'
import re
import docutils
from docutils import nodes, writers, languages
try:
import roman
except ImportError:
import docutils.utils.roman as roman
FIELD_LIST_INDENT = 7
DEFINITION_LIST_INDENT = 7
OPTION_LIST_INDENT = 7
BLOCKQOUTE_INDENT = 3.5
LITERAL_BLOCK_INDENT = 3.5
# Define two macros so man/roff can calculate the
# indent/unindent margins by itself
MACRO_DEF = (r""".
.nr rst2man-indent-level 0
.
.de1 rstReportMargin
\\$1 \\n[an-margin]
level \\n[rst2man-indent-level]
level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
-
\\n[rst2man-indent0]
\\n[rst2man-indent1]
\\n[rst2man-indent2]
..
.de1 INDENT
.\" .rstReportMargin pre:
. RS \\$1
. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
. nr rst2man-indent-level +1
.\" .rstReportMargin post:
..
.de UNINDENT
. RE
.\" indent \\n[an-margin]
.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
.nr rst2man-indent-level -1
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
""")
class Writer(writers.Writer):
supported = ('manpage',)
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = Translator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
class Table(object):
def __init__(self):
self._rows = []
self._options = ['center']
self._tab_char = '\t'
self._coldefs = []
def new_row(self):
self._rows.append([])
def append_separator(self, separator):
"""Append the separator for table head."""
self._rows.append([separator])
def append_cell(self, cell_lines):
"""cell_lines is an array of lines"""
start = 0
if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
start = 1
self._rows[-1].append(cell_lines[start:])
if len(self._coldefs) < len(self._rows[-1]):
self._coldefs.append('l')
def _minimize_cell(self, cell_lines):
"""Remove leading and trailing blank and ``.sp`` lines"""
while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
del cell_lines[0]
while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
del cell_lines[-1]
def as_list(self):
text = ['.TS\n']
text.append(' '.join(self._options) + ';\n')
text.append('|%s|.\n' % ('|'.join(self._coldefs)))
for row in self._rows:
# row = array of cells. cell = array of lines.
text.append('_\n') # line above
text.append('T{\n')
for i in range(len(row)):
cell = row[i]
self._minimize_cell(cell)
text.extend(cell)
if not text[-1].endswith('\n'):
text[-1] += '\n'
if i < len(row)-1:
text.append('T}'+self._tab_char+'T{\n')
else:
text.append('T}\n')
text.append('_\n')
text.append('.TE\n')
return text
class Translator(nodes.NodeVisitor):
""""""
words_and_spaces = re.compile(r'\S+| +|\n')
possibly_a_roff_command = re.compile(r'\.\w')
document_start = """Man page generated from reStructuredText."""
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode, document.reporter)
self.head = []
self.body = []
self.foot = []
self.section_level = 0
self.context = []
self.topic_class = ''
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
# the list style "*" bullet or "#" numbered
self._list_char = []
# writing the header .TH and .SH NAME is postboned after
# docinfo.
self._docinfo = {
"title" : "", "title_upper": "",
"subtitle" : "",
"manual_section" : "", "manual_group" : "",
"author" : [],
"date" : "",
"copyright" : "",
"version" : "",
}
self._docinfo_keys = [] # a list to keep the sequence as in source.
self._docinfo_names = {} # to get name from text not normalized.
self._in_docinfo = None
self._active_table = None
self._in_literal = False
self.header_written = 0
self._line_block = 0
self.authors = []
self.section_level = 0
self._indent = [0]
# central definition of simple processing rules
# what to output on : visit, depart
# Do not use paragraph requests ``.PP`` because these set indentation.
# use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
#
# Fonts are put on a stack, the top one is used.
# ``.ft P`` or ``\\fP`` pop from stack.
# ``B`` bold, ``I`` italic, ``R`` roman should be available.
# Hopefully ``C`` courier too.
self.defs = {
'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
'definition_list_item' : ('.TP', ''),
'field_name' : ('.TP\n.B ', '\n'),
'literal' : ('\\fB', '\\fP'),
'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
'option_list_item' : ('.TP\n', ''),
'reference' : (r'\fI\%', r'\fP'),
'emphasis': ('\\fI', '\\fP'),
'strong' : ('\\fB', '\\fP'),
'term' : ('\n.B ', '\n'),
'title_reference' : ('\\fI', '\\fP'),
'topic-title' : ('.SS ',),
'sidebar-title' : ('.SS ',),
'problematic' : ('\n.nf\n', '\n.fi\n'),
}
# NOTE do not specify the newline before a dot-command, but ensure
# it is there.
def comment_begin(self, text):
"""Return commented version of the passed text WITHOUT end of
line/comment."""
prefix = '.\\" '
out_text = ''.join(
[(prefix + in_line + '\n')
for in_line in text.split('\n')])
return out_text
def comment(self, text):
"""Return commented version of the passed text."""
return self.comment_begin(text)+'.\n'
def ensure_eol(self):
"""Ensure the last line in body is terminated by new line."""
if len(self.body) > 0 and self.body[-1][-1] != '\n':
self.body.append('\n')
def astext(self):
"""Return the final formatted document as a string."""
if not self.header_written:
# ensure we get a ".TH" as viewers require it.
self.append_header()
# filter body
for i in xrange(len(self.body)-1, 0, -1):
# remove superfluous vertical gaps.
if self.body[i] == '.sp\n':
if self.body[i - 1][:4] in ('.BI ','.IP '):
self.body[i] = '.\n'
elif (self.body[i - 1][:3] == '.B ' and
self.body[i - 2][:4] == '.TP\n'):
self.body[i] = '.\n'
elif (self.body[i - 1] == '\n' and
not self.possibly_a_roff_command.match(self.body[i - 2]) and
(self.body[i - 3][:7] == '.TP\n.B '
or self.body[i - 3][:4] == '\n.B ')
):
self.body[i] = '.\n'
return ''.join(self.head + self.body + self.foot)
def deunicode(self, text):
text = text.replace(u'\xa0', '\\ ')
text = text.replace(u'\u2020', '\\(dg')
return text
def visit_Text(self, node):
text = node.astext()
text = text.replace('\\','\\e')
replace_pairs = [
(u'-', ur'\-'),
(u'\'', ur'\(aq'),
(u'´', ur'\''),
(u'`', ur'\(ga'),
]
for (in_char, out_markup) in replace_pairs:
text = text.replace(in_char, out_markup)
# unicode
text = self.deunicode(text)
# prevent interpretation of "." at line start
if text.startswith('.'):
text = '\\&' + text
if self._in_literal:
text = text.replace('\n.', '\n\\&.')
self.body.append(text)
def depart_Text(self, node):
pass
def list_start(self, node):
class enum_char(object):
enum_style = {
'bullet' : '\\(bu',
'emdash' : '\\(em',
}
def __init__(self, style):
self._style = style
if node.has_key('start'):
self._cnt = node['start'] - 1
else:
self._cnt = 0
self._indent = 2
if style == 'arabic':
# indentation depends on number of childrens
# and start value.
self._indent = len(str(len(node.children)))
self._indent += len(str(self._cnt)) + 1
elif style == 'loweralpha':
self._cnt += ord('a') - 1
self._indent = 3
elif style == 'upperalpha':
self._cnt += ord('A') - 1
self._indent = 3
elif style.endswith('roman'):
self._indent = 5
def next(self):
if self._style == 'bullet':
return self.enum_style[self._style]
elif self._style == 'emdash':
return self.enum_style[self._style]
self._cnt += 1
# TODO add prefix postfix
if self._style == 'arabic':
return "%d." % self._cnt
elif self._style in ('loweralpha', 'upperalpha'):
return "%c." % self._cnt
elif self._style.endswith('roman'):
res = roman.toRoman(self._cnt) + '.'
if self._style.startswith('upper'):
return res.upper()
return res.lower()
else:
return "%d." % self._cnt
def get_width(self):
return self._indent
def __repr__(self):
return 'enum_style-%s' % list(self._style)
if node.has_key('enumtype'):
self._list_char.append(enum_char(node['enumtype']))
else:
self._list_char.append(enum_char('bullet'))
if len(self._list_char) > 1:
# indent nested lists
self.indent(self._list_char[-2].get_width())
else:
self.indent(self._list_char[-1].get_width())
def list_end(self):
self.dedent()
self._list_char.pop()
def header(self):
tmpl = (".TH %(title_upper)s %(manual_section)s"
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
"%(title)s \- %(subtitle)s\n")
return tmpl % self._docinfo
def append_header(self):
"""append header with .TH and .SH NAME"""
# NOTE before everything
# .TH title_upper section date source manual
if self.header_written:
return
self.head.append(self.header())
self.head.append(MACRO_DEF)
self.header_written = 1
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
pass
def visit_admonition(self, node, name=None):
#
# Make admonitions a simple block quote
# with a strong heading
#
# Using .IP/.RE doesn't preserve indentation
# when admonitions contain bullets, literal,
# and/or block quotes.
#
if name:
# .. admonition:: has no name
self.body.append('.sp\n')
name = '%s%s:%s\n' % (
self.defs['strong'][0],
self.language.labels.get(name, name).upper(),
self.defs['strong'][1],
)
self.body.append(name)
self.visit_block_quote(node)
def depart_admonition(self, node):
self.depart_block_quote(node)
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
depart_attention = depart_admonition
def visit_docinfo_item(self, node, name):
if name == 'author':
self._docinfo[name].append(node.astext())
else:
self._docinfo[name] = node.astext()
self._docinfo_keys.append(name)
raise nodes.SkipNode
def depart_docinfo_item(self, node):
pass
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
depart_author = depart_docinfo_item
def visit_authors(self, node):
# _author is called anyway.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
# BUG/HACK: indent alway uses the _last_ indention,
# thus we need two of them.
self.indent(BLOCKQOUTE_INDENT)
self.indent(0)
def depart_block_quote(self, node):
self.dedent()
self.dedent()
def visit_bullet_list(self, node):
self.list_start(node)
def depart_bullet_list(self, node):
self.list_end()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
depart_caution = depart_admonition
def visit_citation(self, node):
num, text = node.astext().split(None, 1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % num)
def depart_citation(self, node):
pass
def visit_citation_reference(self, node):
self.body.append('['+node.astext()+']')
raise nodes.SkipNode
def visit_classifier(self, node):
pass
def depart_classifier(self, node):
pass
def visit_colspec(self, node):
self.colspecs.append(node)
def depart_colspec(self, node):
pass
def write_colspecs(self):
self.body.append("%s.\n" % ('L '*len(self.colspecs)))
def visit_comment(self, node,
sub=re.compile('-(?=-)').sub):
self.body.append(self.comment(node.astext()))
raise nodes.SkipNode
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
depart_contact = depart_docinfo_item
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
depart_danger = depart_admonition
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
pass
def visit_definition_list(self, node):
self.indent(DEFINITION_LIST_INDENT)
def depart_definition_list(self, node):
self.dedent()
def visit_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][0])
def depart_definition_list_item(self, node):
self.body.append(self.defs['definition_list_item'][1])
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self._in_docinfo = 1
def depart_docinfo(self, node):
self._in_docinfo = None
# NOTE nothing should be written before this
self.append_header()
def visit_doctest_block(self, node):
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_doctest_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
def visit_document(self, node):
# no blank line between comment and header.
self.head.append(self.comment(self.document_start).rstrip()+'\n')
# writing header is postboned
self.header_written = 0
def depart_document(self, node):
if self._docinfo['author']:
self.body.append('.SH AUTHOR\n%s\n'
% ', '.join(self._docinfo['author']))
skip = ('author', 'copyright', 'date',
'manual_group', 'manual_section',
'subtitle',
'title', 'title_upper', 'version')
for name in self._docinfo_keys:
if name == 'address':
self.body.append("\n%s:\n%s%s.nf\n%s\n.fi\n%s%s" % (
self.language.labels.get(name, name),
self.defs['indent'][0] % 0,
self.defs['indent'][0] % BLOCKQOUTE_INDENT,
self._docinfo[name],
self.defs['indent'][1],
self.defs['indent'][1]))
elif not name in skip:
if name in self._docinfo_names:
label = self._docinfo_names[name]
else:
label = self.language.labels.get(name, name)
self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
if self._docinfo['copyright']:
self.body.append('.SH COPYRIGHT\n%s\n'
% self._docinfo['copyright'])
self.body.append(self.comment(
'Generated by docutils manpage writer.'))
def visit_emphasis(self, node):
self.body.append(self.defs['emphasis'][0])
def depart_emphasis(self, node):
self.body.append(self.defs['emphasis'][1])
def visit_entry(self, node):
# a cell in a table row
if 'morerows' in node:
self.document.reporter.warning('"table row spanning" not supported',
base_node=node)
if 'morecols' in node:
self.document.reporter.warning(
'"table cell spanning" not supported', base_node=node)
self.context.append(len(self.body))
def depart_entry(self, node):
start = self.context.pop()
self._active_table.append_cell(self.body[start:])
del self.body[start:]
def visit_enumerated_list(self, node):
self.list_start(node)
def depart_enumerated_list(self, node):
self.list_end()
def visit_error(self, node):
self.visit_admonition(node, 'error')
depart_error = depart_admonition
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_body(self, node):
if self._in_docinfo:
name_normalized = self._field_name.lower().replace(" ","_")
self._docinfo_names[name_normalized] = self._field_name
self.visit_docinfo_item(node, name_normalized)
raise nodes.SkipNode
def depart_field_body(self, node):
pass
def visit_field_list(self, node):
self.indent(FIELD_LIST_INDENT)
def depart_field_list(self, node):
self.dedent()
def visit_field_name(self, node):
if self._in_docinfo:
self._field_name = node.astext()
raise nodes.SkipNode
else:
self.body.append(self.defs['field_name'][0])
def depart_field_name(self, node):
self.body.append(self.defs['field_name'][1])
def visit_figure(self, node):
self.indent(2.5)
self.indent(0)
def depart_figure(self, node):
self.dedent()
self.dedent()
def visit_footer(self, node):
self.document.reporter.warning('"footer" not supported',
base_node=node)
def depart_footer(self, node):
pass
def visit_footnote(self, node):
num, text = node.astext().split(None, 1)
num = num.strip()
self.body.append('.IP [%s] 5\n' % self.deunicode(num))
def depart_footnote(self, node):
pass
def footnote_backrefs(self, node):
self.document.reporter.warning('"footnote_backrefs" not supported',
base_node=node)
def visit_footnote_reference(self, node):
self.body.append('['+self.deunicode(node.astext())+']')
raise nodes.SkipNode
def depart_footnote_reference(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
raise NotImplementedError, node.astext()
def depart_header(self, node):
pass
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
depart_hint = depart_admonition
def visit_subscript(self, node):
self.body.append('\\s-2\\d')
def depart_subscript(self, node):
self.body.append('\\u\\s0')
def visit_superscript(self, node):
self.body.append('\\s-2\\u')
def depart_superscript(self, node):
self.body.append('\\d\\s0')
def visit_attribution(self, node):
self.body.append('\\(em ')
def depart_attribution(self, node):
self.body.append('\n')
def visit_image(self, node):
self.document.reporter.warning('"image" not supported',
base_node=node)
text = []
if 'alt' in node.attributes:
text.append(node.attributes['alt'])
if 'uri' in node.attributes:
text.append(node.attributes['uri'])
self.body.append('[image: %s]\n' % ('/'.join(text)))
raise nodes.SkipNode
def visit_important(self, node):
self.visit_admonition(node, 'important')
depart_important = depart_admonition
def visit_label(self, node):
# footnote and citation
if (isinstance(node.parent, nodes.footnote)
or isinstance(node.parent, nodes.citation)):
raise nodes.SkipNode
self.document.reporter.warning('"unsupported "label"',
base_node=node)
self.body.append('[')
def depart_label(self, node):
self.body.append(']\n')
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# WHAT should we use .INDENT, .UNINDENT ?
def visit_line_block(self, node):
self._line_block += 1
if self._line_block == 1:
# TODO: separate inline blocks from previous paragraphs
# see http://hg.intevation.org/mercurial/crew/rev/9c142ed9c405
# self.body.append('.sp\n')
# but it does not work for me.
self.body.append('.nf\n')
else:
self.body.append('.in +2\n')
def depart_line_block(self, node):
self._line_block -= 1
if self._line_block == 0:
self.body.append('.fi\n')
self.body.append('.sp\n')
else:
self.body.append('.in -2\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.body.append('\n')
def visit_list_item(self, node):
# man 7 man argues to use ".IP" instead of ".TP"
self.body.append('.IP %s %d\n' % (
self._list_char[-1].next(),
self._list_char[-1].get_width(),))
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.body.append(self.defs['literal'][0])
def depart_literal(self, node):
self.body.append(self.defs['literal'][1])
def visit_literal_block(self, node):
# BUG/HACK: indent alway uses the _last_ indention,
# thus we need two of them.
self.indent(LITERAL_BLOCK_INDENT)
self.indent(0)
self.body.append(self.defs['literal_block'][0])
self._in_literal = True
def depart_literal_block(self, node):
self._in_literal = False
self.body.append(self.defs['literal_block'][1])
self.dedent()
self.dedent()
def visit_math(self, node):
self.document.reporter.warning('"math" role not supported',
base_node=node)
self.visit_literal(node)
def depart_math(self, node):
self.depart_literal(node)
def visit_math_block(self, node):
self.document.reporter.warning('"math" directive not supported',
base_node=node)
self.visit_literal_block(node)
def depart_math_block(self, node):
self.depart_literal_block(node)
def visit_meta(self, node):
raise NotImplementedError, node.astext()
def depart_meta(self, node):
pass
def visit_note(self, node):
self.visit_admonition(node, 'note')
depart_note = depart_admonition
def indent(self, by=0.5):
# if we are in a section ".SH" there already is a .RS
step = self._indent[-1]
self._indent.append(by)
self.body.append(self.defs['indent'][0] % step)
def dedent(self):
self._indent.pop()
self.body.append(self.defs['indent'][1])
def visit_option_list(self, node):
self.indent(OPTION_LIST_INDENT)
def depart_option_list(self, node):
self.dedent()
def visit_option_list_item(self, node):
# one item of the list
self.body.append(self.defs['option_list_item'][0])
def depart_option_list_item(self, node):
self.body.append(self.defs['option_list_item'][1])
def visit_option_group(self, node):
# as one option could have several forms it is a group
# options without parameter bold only, .B, -v
# options with parameter bold italic, .BI, -f file
#
# we do not know if .B or .BI
self.context.append('.B') # blind guess
self.context.append(len(self.body)) # to be able to insert later
self.context.append(0) # option counter
def depart_option_group(self, node):
self.context.pop() # the counter
start_position = self.context.pop()
text = self.body[start_position:]
del self.body[start_position:]
self.body.append('%s%s\n' % (self.context.pop(), ''.join(text)))
def visit_option(self, node):
# each form of the option will be presented separately
if self.context[-1] > 0:
self.body.append('\\fP,\\fB ')
if self.context[-3] == '.BI':
self.body.append('\\')
self.body.append(' ')
def depart_option(self, node):
self.context[-1] += 1
def visit_option_string(self, node):
# do not know if .B or .BI
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.context[-3] = '.BI' # bold/italic alternate
if node['delimiter'] != ' ':
self.body.append('\\fB%s ' % node['delimiter'])
elif self.body[len(self.body)-1].endswith('='):
# a blank only means no blank in output, just changing font
self.body.append(' ')
else:
# blank backslash blank, switch font then a blank
self.body.append(' \\ ')
def depart_option_argument(self, node):
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
pass
def first_child(self, node):
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
if isinstance(child, nodes.Invisible):
continue
if child is node:
return 1
break
return 0
def visit_paragraph(self, node):
# ``.PP`` : Start standard indented paragraph.
# ``.LP`` : Start block paragraph, all except the first.
# ``.P [type]`` : Start paragraph type.
# NOTE dont use paragraph starts because they reset indentation.
# ``.sp`` is only vertical space
self.ensure_eol()
if not self.first_child(node):
self.body.append('.sp\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_problematic(self, node):
self.body.append(self.defs['problematic'][0])
def depart_problematic(self, node):
self.body.append(self.defs['problematic'][1])
def visit_raw(self, node):
if node.get('format') == 'manpage':
self.body.append(node.astext() + "\n")
# Keep non-manpage raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
"""E.g. link or email address."""
self.body.append(self.defs['reference'][0])
def depart_reference(self, node):
self.body.append(self.defs['reference'][1])
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
depart_revision = depart_docinfo_item
def visit_row(self, node):
self._active_table.new_row()
def depart_row(self, node):
pass
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
depart_status = depart_docinfo_item
def visit_strong(self, node):
self.body.append(self.defs['strong'][0])
def depart_strong(self, node):
self.body.append(self.defs['strong'][1])
def visit_substitution_definition(self, node):
"""Internal only."""
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.document.reporter.warning('"substitution_reference" not supported',
base_node=node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['strong'][0])
elif isinstance(node.parent, nodes.document):
self.visit_docinfo_item(node, 'subtitle')
elif isinstance(node.parent, nodes.section):
self.body.append(self.defs['strong'][0])
def depart_subtitle(self, node):
# document subtitle calls SkipNode
self.body.append(self.defs['strong'][1]+'\n.PP\n')
def visit_system_message(self, node):
# TODO add report_level
#if node['level'] < self.document.reporter['writer'].report_level:
# Level is too low to display:
# raise nodes.SkipNode
attr = {}
backref_text = ''
if node.hasattr('id'):
attr['name'] = node['id']
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('.IP "System Message: %s/%s (%s:%s)"\n'
% (node['type'], node['level'], node['source'], line))
def depart_system_message(self, node):
pass
def visit_table(self, node):
self._active_table = Table()
def depart_table(self, node):
self.ensure_eol()
self.body.extend(self._active_table.as_list())
self._active_table = None
def visit_target(self, node):
# targets are in-document hyper targets, without any use for man-pages.
raise nodes.SkipNode
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_term(self, node):
self.body.append(self.defs['term'][0])
def depart_term(self, node):
self.body.append(self.defs['term'][1])
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
# MAYBE double line '='
pass
def depart_thead(self, node):
# MAYBE double line '='
pass
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
depart_tip = depart_admonition
def visit_title(self, node):
if isinstance(node.parent, nodes.topic):
self.body.append(self.defs['topic-title'][0])
elif isinstance(node.parent, nodes.sidebar):
self.body.append(self.defs['sidebar-title'][0])
elif isinstance(node.parent, nodes.admonition):
self.body.append('.IP "')
elif self.section_level == 0:
self._docinfo['title'] = node.astext()
# document title for .TH
self._docinfo['title_upper'] = node.astext().upper()
raise nodes.SkipNode
elif self.section_level == 1:
self.body.append('.SH %s\n' % self.deunicode(node.astext().upper()))
raise nodes.SkipNode
else:
self.body.append('.SS ')
def depart_title(self, node):
if isinstance(node.parent, nodes.admonition):
self.body.append('"')
self.body.append('\n')
def visit_title_reference(self, node):
"""inline citation reference"""
self.body.append(self.defs['title_reference'][0])
def depart_title_reference(self, node):
self.body.append(self.defs['title_reference'][1])
def visit_topic(self, node):
pass
def depart_topic(self, node):
pass
def visit_sidebar(self, node):
pass
def depart_sidebar(self, node):
pass
def visit_rubric(self, node):
pass
def depart_rubric(self, node):
pass
def visit_transition(self, node):
# .PP Begin a new paragraph and reset prevailing indent.
# .sp N leaves N lines of blank space.
# .ce centers the next line
self.body.append('\n.sp\n.ce\n----\n')
def depart_transition(self, node):
self.body.append('\n.ce 0\n.sp\n')
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
depart_warning = depart_admonition
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s'
% node.__class__.__name__)
# vim: set fileencoding=utf-8 et ts=4 ai :
|
web30s/odoo-9.0c-20160402
|
refs/heads/master
|
hello/templates/openerp/addons/website_event/__init__.py
|
1023
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import controllers
import models
|
lenovor/scikit-learn
|
refs/heads/master
|
sklearn/feature_extraction/dict_vectorizer.py
|
234
|
# Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
|
xwolf12/django
|
refs/heads/master
|
tests/mutually_referential/models.py
|
414
|
"""
Mutually referential many-to-one relationships
Strings can be used instead of model literals to set up "lazy" relations.
"""
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=100)
# Use a simple string for forward declarations.
bestchild = models.ForeignKey("Child", models.SET_NULL, null=True, related_name="favored_by")
class Child(models.Model):
name = models.CharField(max_length=100)
# You can also explicitly specify the related app.
parent = models.ForeignKey("mutually_referential.Parent", models.CASCADE)
|
burito/PyUI
|
refs/heads/master
|
tests/spread.py
|
1
|
import testopt
import time
import pyui
def onChanged(x, y, value):
print "Cell (%d,%d) Set to <%s>" % ( x, y, value)
def onInserted(x, y, value):
print "Cell inserted (%d,%d) Set to <%s>" % ( x, y, value)
def run():
opts = testopt.parseCommandLine(800, 600)
done = 1
frame = 0
t = time.time()
pyui.init(*opts)
pyui.desktop.getRenderer().setMouseCursor("cursor.png", 11,7)
for i in range(0,1):
w = pyui.widgets.Frame(50+i*20, 50+i*20, 400, 400, "spreadsheet")
w.setLayout(pyui.layouts.GridLayoutManager(1,1,0))
b = pyui.sheet.Sheet(onChanged, onInserted)
b.setColumnTitle(3,"A very Long one")
b.setColumnTitle(2,"Table name")
b.setColumnTitle(1,"Something goes here....")
w.addChild(b)
w.pack()
pyui.run()
print "done"
pyui.quit()
if __name__ == '__main__':
run()
|
adrienbrault/home-assistant
|
refs/heads/dev
|
tests/components/cloudflare/conftest.py
|
8
|
"""Define fixtures available for all tests."""
from unittest.mock import patch
from pytest import fixture
from . import _get_mock_cfupdate
@fixture
def cfupdate(hass):
"""Mock the CloudflareUpdater for easier testing."""
mock_cfupdate = _get_mock_cfupdate()
with patch(
"homeassistant.components.cloudflare.CloudflareUpdater",
return_value=mock_cfupdate,
) as mock_api:
yield mock_api
@fixture
def cfupdate_flow(hass):
"""Mock the CloudflareUpdater for easier config flow testing."""
mock_cfupdate = _get_mock_cfupdate()
with patch(
"homeassistant.components.cloudflare.config_flow.CloudflareUpdater",
return_value=mock_cfupdate,
) as mock_api:
yield mock_api
|
destroy/SleekXMPP-gevent
|
refs/heads/master
|
tests/test_tostring.py
|
1
|
from sleekxmpp.test import *
from sleekxmpp.stanza import Message
from sleekxmpp.xmlstream.stanzabase import ET, ElementBase
from sleekxmpp.xmlstream.tostring import tostring, xml_escape
class TestToString(SleekTest):
"""
Test the implementation of sleekxmpp.xmlstream.tostring
"""
def tearDown(self):
self.stream_close()
def tryTostring(self, original='', expected=None, message='', **kwargs):
"""
Compare the result of calling tostring against an
expected result.
"""
if not expected:
expected=original
if isinstance(original, str):
xml = ET.fromstring(original)
else:
xml=original
result = tostring(xml, **kwargs)
self.failUnless(result == expected, "%s: %s" % (message, result))
def testXMLEscape(self):
"""Test escaping XML special characters."""
original = """<foo bar="baz">'Hi & welcome!'</foo>"""
escaped = xml_escape(original)
desired = """<foo bar="baz">'Hi"""
desired += """ & welcome!'</foo>"""
self.failUnless(escaped == desired,
"XML escaping did not work: %s." % escaped)
def testEmptyElement(self):
"""Test converting an empty element to a string."""
self.tryTostring(
original='<bar xmlns="foo" />',
message="Empty element not serialized correctly")
def testEmptyElementWrapped(self):
"""Test converting an empty element inside another element."""
self.tryTostring(
original='<bar xmlns="foo"><baz /></bar>',
message="Wrapped empty element not serialized correctly")
def testEmptyElementWrappedText(self):
"""
Test converting an empty element wrapped with text
inside another element.
"""
self.tryTostring(
original='<bar xmlns="foo">Some text. <baz /> More text.</bar>',
message="Text wrapped empty element serialized incorrectly")
def testMultipleChildren(self):
"""Test converting multiple child elements to a Unicode string."""
self.tryTostring(
original='<bar xmlns="foo"><baz><qux /></baz><quux /></bar>',
message="Multiple child elements not serialized correctly")
def testXMLNS(self):
"""
Test using xmlns tostring parameter, which will prevent adding
an xmlns attribute to the serialized element if the element's
namespace is the same.
"""
self.tryTostring(
original='<bar xmlns="foo" />',
expected='<bar />',
message="The xmlns parameter was not used properly.",
xmlns='foo')
def testTailContent(self):
"""
Test that elements of the form <a>foo <b>bar</b> baz</a> only
include " baz" once.
"""
self.tryTostring(
original='<a>foo <b>bar</b> baz</a>',
message='Element tail content is incorrect.')
def testStanzaNs(self):
"""
Test using the stanza_ns tostring parameter, which will prevent
adding an xmlns attribute to the serialized element if the
element's namespace is the same.
"""
self.tryTostring(
original='<bar xmlns="foo" />',
expected='<bar />',
message="The stanza_ns parameter was not used properly.",
stanza_ns='foo')
def testStanzaStr(self):
"""
Test that stanza objects are serialized properly.
"""
utf8_message = '\xe0\xb2\xa0_\xe0\xb2\xa0'
if not hasattr(utf8_message, 'decode'):
# Python 3
utf8_message = bytes(utf8_message, encoding='utf-8')
msg = Message()
msg['body'] = utf8_message.decode('utf-8')
expected = '<message><body>\xe0\xb2\xa0_\xe0\xb2\xa0</body></message>'
result = msg.__str__()
self.failUnless(result == expected,
"Stanza Unicode handling is incorrect: %s" % result)
def testXMLLang(self):
"""Test that serializing xml:lang works."""
self.stream_start()
msg = self.Message()
msg._set_attr('{%s}lang' % msg.xml_ns, "no")
expected = '<message xml:lang="no" />'
result = msg.__str__()
self.failUnless(expected == result,
"Serialization with xml:lang failed: %s" % result)
suite = unittest.TestLoader().loadTestsFromTestCase(TestToString)
|
julianrees/scripts
|
refs/heads/master
|
python/geomenergymap.py
|
1
|
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
(c) 2017 Julian Rees
License: GNU GPLv3
Description: Plot the energy trajectory of a geometry optimization in ORCA.
Run: python geomenergymap.py filename
Arguments: filename - file name with extension
Dependencies: matplotlib
--------------------------------------------------------------------------------
"""
print(__doc__)
import sys
import math
import matplotlib.pyplot as plt
# check for correct number of inputs
if len(sys.argv) != 2:
sys.exit("You must supply exactly one filename!")
# define search string and clear the list
searchfor = "SUCCESS "
searchfor1 = "SCF NOT CONVERGED"
searchfor2 = "FINAL SINGLE POINT ENERGY"
cycles = []
energies = []
# open filename
fname = str(sys.argv[1])
with open(fname) as f:
# search lines for string and move down two lines to get energy
for line in f:
if searchfor in line:
line = f.next()
cycle = int(line.split()[4])
cycles.append(cycle)
elif searchfor1 in line:
cycle = 125
cycles.append(cycle)
elif searchfor2 in line:
energy = float(line.split()[4])
energies.append(energy)
# plot cycles and energies
plt.plot(cycles, 'o-')
plt.title('%d Geometry Iterations' %len(cycles))
plt.xlabel('Geometry Iteration')
plt.ylabel('SCF Cycles')
plt.show()
plt.plot(energies, 'o-')
plt.title('%d Geometry Iterations' %len(cycles))
plt.xlabel('Geometry Iteration')
plt.ylabel('Final SP Energies')
plt.show()
|
unnikrishnankgs/va
|
refs/heads/master
|
venv/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py
|
33
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training as train
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_optimizer(spec):
if isinstance(spec, six.string_types):
return layers.OPTIMIZER_CLS_NAMES[spec](
learning_rate=_LEARNING_RATE)
elif callable(spec):
return spec()
return spec
# TODO(ispir): Remove this function by fixing '_infer_model' with single outputs
# and as_iteable case.
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables):
"""Adds a fake bias feature column filled with all 1s."""
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
if not feature_columns:
raise ValueError("feature_columns can't be empty.")
# Loop through input tensors until we can figure out batch_size.
batch_size = None
for column in columns_to_tensors.values():
if isinstance(column, tuple):
column = column[0]
if isinstance(column, sparse_tensor.SparseTensor):
shape = tensor_util.constant_value(column.dense_shape)
if shape is not None:
batch_size = shape[0]
break
else:
batch_size = array_ops.shape(column)[0]
break
if batch_size is None:
raise ValueError("Could not infer batch size from input features.")
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones([batch_size, 1],
dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or _get_default_optimizer(feature_columns)
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner) as scope:
if all([isinstance(fc, feature_column._FeatureColumn) # pylint: disable=protected-access
for fc in feature_columns]):
if joint_weights:
layer_fn = layers.joint_weighted_sum_from_feature_columns
else:
layer_fn = layers.weighted_sum_from_feature_columns
logits, _, _ = layer_fn(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope)
else:
logits = fc_core.linear_model(
features=features,
feature_columns=feature_columns,
units=head.logits_dimension,
weight_collections=[parent_scope])
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection(parent_scope)
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (_get_optimizer(optimizer).apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
assert head.logits_dimension == 1, ("SDCA only applies for "
"logits_dimension=1.")
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(head))
parent_scope = "linear"
with variable_scope.variable_op_scope(
features.values(), parent_scope) as scope:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(
training_chief_hooks=(model_fn_ops.training_chief_hooks +
[update_weights_hook]))
return model_fn_ops
# Ensures consistency with LinearComposableModel.
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return train.FtrlOptimizer(learning_rate=learning_rate)
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class LinearClassifier(estimator.Estimator):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
num_loss_partitions=...,
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
def input_fn_predict: # returns x, None.
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
# predict_classes returns class indices.
estimator.predict_classes(input_fn=input_fn_predict)
```
If the user specifies `label_keys` in constructor, labels must be strings from
the `label_keys` vocabulary. Example:
```python
label_keys = ['label0', 'label1', 'label2']
estimator = LinearClassifier(
n_classes=n_classes,
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
label_keys=label_keys)
def input_fn_train: # returns x, y (where y is one of label_keys).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y is one of label_keys).
pass
estimator.evaluate(input_fn=input_fn_eval)
def input_fn_predict: # returns x, None
# predict_classes returns one of label_keys.
estimator.predict_classes(input_fn=input_fn_predict)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
_joint_weight=False,
config=None,
feature_engineering_fn=None,
label_keys=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
_joint_weight: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
# TODO(zoy): Give an unsupported error if enable_centered_bias is
# requested for SDCA once its default changes to False.
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib.multi_class_head(
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
label_keys=label_keys)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert not _joint_weight, ("_joint_weight is incompatible with the"
" SDCAOptimizer")
assert n_classes == 2, "SDCA only applies to binary classification."
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weight,
})
super(LinearClassifier, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearRegressor(estimator.Estimator):
"""Linear regressor model.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib.regression_head(
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
})
super(LinearRegressor, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or export.regression_signature_fn),
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearEstimator(estimator.Estimator):
"""Linear model with user specified head.
Train a generalized linear model to predict label value given observation of
feature values.
Example:
To do poisson regression,
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearEstimator(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
head=head_lib.poisson_regression_head())
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
head,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearEstimator` object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
head: An instance of _Head class.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearEstimator` estimator.
Raises:
ValueError: if optimizer is not supported, e.g., SDCAOptimizer
"""
assert feature_columns
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("LinearEstimator does not support SDCA optimizer.")
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
}
super(LinearEstimator, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
|
google-research/long-range-arena
|
refs/heads/main
|
lra_benchmarks/listops/configs/reformer_base.py
|
1
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.listops.configs import base_listops_config
def get_config():
"""Get the default hyperparameter configuration."""
config = base_listops_config.get_config()
config.model_type = "reformer"
return config
def get_hyper(hyper):
return hyper.product([])
|
giserh/pysparkling
|
refs/heads/master
|
pysparkling/broadcast.py
|
1
|
class Broadcast(object):
def __init__(self, value):
self.value = value
|
sagemathinc/smc
|
refs/heads/master
|
src/scripts/kvm_addresses.py
|
4
|
#!/usr/bin/env python
###############################################################################
#
# CoCalc: Collaborative Calculation in the Cloud
#
# Copyright (C) 2016, Sagemath Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os, sys
def ip_addresses(name):
mac_to_addr = {}
for x in os.popen('arp -an').readlines():
v = x.split()
mac_to_addr[v[3]] = v[1][1:-1]
v = os.popen('virsh dumpxml "%s"' % name).readlines()
ans = []
for x in v:
if 'mac address' in x:
mac = x.split("'")[1]
if mac in mac_to_addr:
ans.append(mac_to_addr[mac])
return ans
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.stderr.write("""
Get ip addresses of a KVM virtual machine (not vpn related), one per line:
Usage: %s [name of machine]
""" % sys.argv[0])
sys.exit(1)
for x in ip_addresses(sys.argv[1]):
print x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.