code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Jython Database Specification API 2.0
#
# $Id: zxtest.py,v 1.1 2005/10/05 20:19:30 eytanadar Exp $
#
# Copyright (c) 2001 brian zimmer <bzimmer@ziclix.com>
from com.ziclix.python.sql import zxJDBC
from java.util import Calendar, Date as JDate
import tempfile, os, time, runner
class zxCoreTestCase(runner.SQLTestCase):
def setUp(self):
runner.SQLTestCase.setUp(self)
self.db = self.connect()
self.db.autocommit = 0
def tearDown(self):
self.db.close()
runner.SQLTestCase.tearDown(self)
def connect(self):
factory = runner.__imp__(self.factory.classname)
args = map(lambda x: x[1], self.factory.arguments)
connect = getattr(factory, self.factory.method)
return apply(connect, args, self.factory.keywords)
def cursor(self, dynamic=0):
c = self.db.cursor(dynamic)
if hasattr(self, "datahandler"):
c.datahandler = self.datahandler(c.datahandler)
return c
class zxJDBCTestCase(zxCoreTestCase):
def setUp(self):
zxCoreTestCase.setUp(self)
self.db = self.connect()
self.db.autocommit = 0
c = self.cursor()
try:
c.execute("drop table zxtesting")
except:
self.db.rollback()
try:
c.execute("create table zxtesting (id int not null, name varchar(32), state varchar(32), primary key (id))")
c.execute("insert into zxtesting (id, name, state) values (1, 'test0', 'il')")
c.execute("insert into zxtesting (id, name, state) values (2, 'test1', 'wi')")
c.execute("insert into zxtesting (id, name, state) values (3, 'test2', 'tx')")
c.execute("insert into zxtesting (id, name, state) values (4, 'test3', 'co')")
c.execute("insert into zxtesting (id, name, state) values (5, 'test4', 'il')")
c.execute("insert into zxtesting (id, name, state) values (6, 'test5', 'ca')")
c.execute("insert into zxtesting (id, name, state) values (7, 'test6', 'wi')")
self.db.commit()
finally:
c.close()
def tearDown(self):
c = self.cursor()
try:
try:
c.execute("drop table zxtesting")
except:
self.db.rollback()
finally:
c.close()
zxCoreTestCase.tearDown(self)
class zxAPITestCase(zxJDBCTestCase):
def testConnection(self):
"""testing connection"""
assert self.db, "invalid connection"
def testAutocommit(self):
"""testing autocommit functionality"""
if self.db.__connection__.getMetaData().supportsTransactions():
self.db.autocommit = 1
self.assertEquals(1, self.db.__connection__.getAutoCommit())
self.db.autocommit = 0
self.assertEquals(0, self.db.__connection__.getAutoCommit())
def testSimpleQuery(self):
"""testing simple queries with cursor.execute(), no parameters"""
c = self.cursor()
try:
c.execute("select count(*) from zxtesting")
f = c.fetchall()
assert len(f) == 1, "expecting one row"
c.execute("select * from zxtesting")
data = c.fetchone()
assert len(f) == 1, "expecting one row"
assert data[0] == 1, "expected [1] rows, got [%d]" % (data[0])
finally:
c.close()
def testColumns(self):
"""testing cursor.columns()"""
c = self.cursor()
try:
c.columns(None, None, "zxtesting", None)
f = c.fetchall()
assert c.rowcount == 3, "columns() failed to report correct number of columns, expected [3], got [%d]" % (c.rowcount)
f.sort(lambda x, y: cmp(x[3], y[3]))
assert "name" == f[1][3].lower(), "expected [name], got [%s]" % (f[1][3].lower())
finally:
c.close()
def testTypeInfo(self):
"""testing cursor.gettypeinfo()"""
c = self.cursor()
try:
c.gettypeinfo()
f = c.fetchall()
assert f is not None, "expected some type information, got None"
# this worked prior to the Fetch re-write, now the client will have to bear the burden, sorry
#c.gettypeinfo(zxJDBC.INTEGER)
#f = c.fetchall()
#assert f[0][1] == zxJDBC.INTEGER, "expected [%d], got [%d]" % (zxJDBC.INTEGER, f[0][1])
finally:
c.close()
def testTableTypeInfo(self):
"""testing cursor.gettabletypeinfo()"""
c = self.cursor()
try:
c.gettabletypeinfo()
c.fetchall()
assert c.rowcount > 0, "expected some table types"
finally:
c.close()
def testTupleParams(self):
"""testing the different ways to pass params to execute()"""
c = self.cursor()
try:
self.assertRaises(zxJDBC.ProgrammingError, c.execute, "select * from zxtesting where id = ?", params=4)
c.execute("select * from zxtesting where id = ?", params=[4])
c.execute("select * from zxtesting where id = ?", params=(4,))
finally:
c.close()
def testConnectionAttribute(self):
"""testing the getting and setting of cursor.connection"""
c = self.cursor()
try:
from com.ziclix.python.sql import PyConnection
assert isinstance(c.connection, PyConnection), "expected PyConnection"
self.assertRaises(TypeError, setattr, (c, "connection", None), None)
finally:
c.close()
def testFetchMany(self):
"""testing cursor.fetchmany()"""
c = self.cursor()
try:
c.execute("select * from zxtesting")
data = c.fetchmany(6)
assert len(data) == 6, "expected [6] rows, got [%d]" % (len(data))
c.execute("select * from zxtesting")
data = c.fetchmany(16)
assert len(data) == 7, "expected [7] rows, got [%d]" % (len(data))
finally:
c.close()
def testQueryWithParameter(self):
"""testing query by parameter"""
c = self.cursor()
try:
c.execute("select name from zxtesting where state = ?", [("il",)], {0:zxJDBC.VARCHAR})
data = c.fetchall()
assert len(data) == 2, "expected [2] rows, got [%d]" % (len(data))
c.execute("select name from zxtesting where state = ?", [("co",)], {0:zxJDBC.VARCHAR})
data = c.fetchall()
assert len(data) == 1, "expected [1] row, got [%d]" % (len(data))
finally:
c.close()
def testInsertWithFile(self):
"""testing insert with file"""
assert self.has_table("texttable"), "missing attribute texttable"
fp = open(tempfile.mktemp(), "w")
c = self.cursor()
try:
try:
c.execute(self.table("texttable")[1])
data = fp.name * 300
data = data[:3500]
fp.write(data)
fp.flush()
fp.close()
fp = open(fp.name, "r")
c.execute("insert into %s (a, b) values (?, ?)" % (self.table("texttable")[0]), [(0, fp)], {1:zxJDBC.LONGVARCHAR})
self.db.commit()
c.execute("select b from %s" % (self.table("texttable")[0]))
f = c.fetchall()
assert len(f) == 1, "expected [1] row, got [%d]" % (len(f))
assert len(f[0][0]) == len(data), "expected [%d], got [%d]" % (len(data), len(f[0][0]))
assert data == f[0][0], "failed to retrieve the same text as inserted"
except Exception, e:
print e
raise e
finally:
c.execute("drop table %s" % (self.table("texttable")[0]))
c.close()
self.db.commit()
fp.close()
os.remove(fp.name)
def __calendar(self):
c = Calendar.getInstance()
c.setTime(JDate())
return c
def testDate(self):
"""testing creation of Date"""
# Java uses milliseconds and Python uses seconds, so adjust the time accordingly
# seeded with Java
c = self.__calendar()
o = zxJDBC.DateFromTicks(c.getTime().getTime() / 1000L)
v = zxJDBC.Date(c.get(Calendar.YEAR), c.get(Calendar.MONTH) + 1, c.get(Calendar.DATE))
assert o.equals(v), "incorrect date conversion using java, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
# seeded with Python
t = time.time()
l = time.localtime(t)
o = zxJDBC.DateFromTicks(t)
v = zxJDBC.Date(l[0], l[1], l[2])
assert o.equals(v), "incorrect date conversion, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
def testTime(self):
"""testing creation of Time"""
# Java uses milliseconds and Python uses seconds, so adjust the time accordingly
# seeded with Java
c = self.__calendar()
o = zxJDBC.TimeFromTicks(c.getTime().getTime() / 1000L)
v = zxJDBC.Time(c.get(Calendar.HOUR), c.get(Calendar.MINUTE), c.get(Calendar.SECOND))
assert o.equals(v), "incorrect date conversion using java, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
# seeded with Python
#t = time.time()
#l = time.localtime(t)
#o = zxJDBC.TimeFromTicks(t)
#v = zxJDBC.Time(l[3], l[4], l[5])
#assert o.equals(v), "incorrect date conversion using python, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
def testTimestamp(self):
"""testing creation of Timestamp"""
# Java uses milliseconds and Python uses seconds, so adjust the time accordingly
# seeded with Java
c = self.__calendar()
o = zxJDBC.TimestampFromTicks(c.getTime().getTime() / 1000L)
v = zxJDBC.Timestamp(c.get(Calendar.YEAR), c.get(Calendar.MONTH) + 1, c.get(Calendar.DATE),
c.get(Calendar.HOUR), c.get(Calendar.MINUTE), c.get(Calendar.SECOND))
assert o.equals(v), "incorrect date conversion using java, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
# seeded with Python
#t = time.time()
#l = time.localtime(t)
#o = zxJDBC.TimestampFromTicks(t)
#v = zxJDBC.Timestamp(l[0], l[1], l[2], l[3], l[4], l[5])
#assert o.equals(v), "incorrect date conversion using python, got [%ld], expected [%ld]" % (v.getTime(), o.getTime())
def _test_precision(self, (tabname, sql), diff, values, attr):
try:
c = self.cursor()
try:
c.execute("drop table %s" % (tabname))
self.db.commit()
except:
self.db.rollback()
finally:
c.close()
try:
c = self.cursor()
c.execute(sql)
c.execute("insert into %s (a, b) values (?, ?)" % (tabname), map(lambda x: (0, x), values))
c.execute("select a, b from %s" % (tabname))
f = c.fetchall()
assert len(values) == len(f), "mismatched result set length"
for i in range(0, len(f)):
v = values[i]
if attr: v = getattr(v, attr)()
msg = "expected [%0.10f], got [%0.10f] for index [%d] of [%d]" % (v, f[i][1], (i+1), len(f))
assert diff(f[i][1], values[i]) < 0.01, msg
self.db.commit()
finally:
c.close()
try:
c = self.cursor()
try:
c.execute("drop table %s" % (tabname))
self.db.commit()
except:
self.db.rollback()
finally:
c.close()
def testFloat(self):
"""testing value of float"""
assert self.has_table("floattable"), "missing attribute floattable"
values = [4.22, 123.44, 292.09, 33.2, 102.00, 445]
self._test_precision(self.table("floattable"), lambda x, y: x-y, values, None)
def testBigDecimal(self):
"""testing value of BigDecimal"""
assert self.has_table("floattable"), "missing attribute floattable"
from java.math import BigDecimal
values = [BigDecimal(x).setScale(2, BigDecimal.ROUND_UP) for x in [4.22, 123.44, 292.09, 33.2, 102.00, 445]]
self._test_precision(self.table("floattable"), lambda x, y, b=BigDecimal: b(x).subtract(y).doubleValue(), values, "doubleValue")
def testBigDecimalConvertedToDouble(self):
"""testing value of BigDecimal when converted to double"""
assert self.has_table("floattable"), "missing attribute floattable"
from java.math import BigDecimal
values = [BigDecimal(x).setScale(2, BigDecimal.ROUND_UP) for x in [4.22, 123.44, 292.09, 33.2, 102.00, 445]]
self._test_precision(self.table("floattable"), lambda x, y: x - y.doubleValue(), values, "doubleValue")
def testNextset(self):
"""testing nextset"""
c = self.cursor()
try:
c.execute("select * from zxtesting where id = ?", [(3,), (4,)])
f = c.fetchall()
assert f, "expected results, got None"
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
assert c.nextset(), "expected next set, got None"
f = c.fetchall()
assert f, "expected results after call to nextset(), got None"
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
finally:
c.close()
def testJavaUtilList(self):
"""testing parameterized values in a java.util.List"""
c = self.cursor()
try:
from java.util import LinkedList
a = LinkedList()
a.add((3,))
c.execute("select * from zxtesting where id = ?", a)
f = c.fetchall()
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
finally:
c.close()
def testUpdateCount(self, insert_only=0):
c = self.cursor()
try:
c.execute("insert into zxtesting values (?, ?, ?)", [(500, 'bz', 'or')])
assert c.updatecount == 1, "expected [1], got [%d]" % (c.updatecount)
c.execute("select * from zxtesting")
assert c.updatecount == -1, "expected updatecount to be -1 after query"
# there's a *feature* in the mysql engine where it returns 0 for delete if there is no
# where clause, regardless of the actual value. using a where clause forces it to calculate
# the appropriate value
c.execute("delete from zxtesting where 1>0")
assert c.updatecount == 8, "expected [8], got [%d]" % (c.updatecount)
finally:
c.close()
def _test_time(self, (tabname, sql), factory, values, _type, _cmp=cmp):
c = self.cursor()
try:
c.execute(sql)
dates = map(lambda x, f=factory: apply(f, x), values)
for a in dates:
c.execute("insert into %s values (1, ?)" % (tabname), [(a,)], {0:_type})
self.db.commit()
c.execute("select * from %s where b = ?" % (tabname), [(dates[0],)], {0:_type})
f = c.fetchall()
assert len(f) == 1, "expected length [1], got [%d]" % (len(f))
assert _cmp(f[0][1], dates[0]) == 0, "expected date [%s], got [%s]" % (str(dates[0]), str(f[0][1]))
c.execute("delete from %s where b = ?" % (tabname), [(dates[1],)], {0:_type})
self.db.commit()
c.execute("select * from %s" % (tabname))
f = c.fetchall()
assert len(f) == len(dates) - 1, "expected length [%d], got [%d]" % (len(dates) - 1, len(f))
finally:
c.execute("drop table %s" % (tabname))
c.close()
self.db.commit()
def testUpdateSelectByDate(self):
"""testing insert, update, query and delete by java.sql.Date"""
assert self.has_table("datetable"), "missing attribute datetable"
def _cmp_(x, y):
xt = (x.getYear(), x.getMonth(), x.getDay())
yt = (y.getYear(), y.getMonth(), y.getDay())
return not xt == yt
values = [(1996, 6, 22), (2000, 11, 12), (2000, 1, 12), (1999, 9, 24)]
self._test_time(self.table("datetable"), zxJDBC.Date, values, zxJDBC.DATE, _cmp_)
def testUpdateSelectByTime(self):
"""testing insert, update, query and delete by java.sql.Time"""
assert self.has_table("timetable"), "missing attribute timetable"
def _cmp_(x, y):
xt = (x.getHours(), x.getMinutes(), x.getSeconds())
yt = (y.getHours(), y.getMinutes(), y.getSeconds())
return not xt == yt
values = [(10, 11, 12), (3, 1, 12), (22, 9, 24)]
self._test_time(self.table("timetable"), zxJDBC.Time, values, zxJDBC.TIME, _cmp_)
def testUpdateSelectByTimestamp(self):
"""testing insert, update, query and delete by java.sql.Timestamp"""
assert self.has_table("timestamptable"), "missing attribute timestamptable"
def _cmp_(x, y):
xt = (x.getYear(), x.getMonth(), x.getDay(), x.getHours(), x.getMinutes(), x.getSeconds())
yt = (y.getYear(), y.getMonth(), y.getDay(), y.getHours(), y.getMinutes(), y.getSeconds())
return not xt == yt
values = [(1996, 6, 22, 10, 11, 12), (2000, 11, 12, 3, 1, 12), (2001, 1, 12, 4, 9, 24)]
self._test_time(self.table("timestamptable"), zxJDBC.Timestamp, values, zxJDBC.TIMESTAMP, _cmp_)
def testOrderOfArgs(self):
"""testing execute with different argument orderings"""
c = self.cursor()
try:
# maxrows only
c.execute("select * from zxtesting", maxrows=3)
f = c.fetchall()
assert len(f) == 3, "expected length [3], got [%d]" % (len(f))
# bindings and params flipped
c.execute("select * from zxtesting where id = ?", bindings={0:zxJDBC.INTEGER}, params=[(3,)])
f = c.fetchall()
assert len(f) == 1, "expected length [1], got [%d]" % (len(f))
# bindings and params flipped, empty params
c.execute("select * from zxtesting where id = ?", bindings={}, params=[(3,)])
f = c.fetchall()
assert len(f) == 1, "expected length [1], got [%d]" % (len(f))
# bindings and params flipped, empty params, empty bindings
c.execute("select * from zxtesting where id = 3", bindings={}, params=[])
f = c.fetchall()
assert len(f) == 1, "expected length [1], got [%d]" % (len(f))
finally:
c.close()
self.db.commit()
def testMaxrows(self):
"""testing maxrows"""
c = self.cursor()
try:
c.execute("select * from zxtesting", maxrows=3)
f = c.fetchall()
assert len(f) == 3, "expected length [3], got [%d]" % (len(f))
c.execute("select count(*) from zxtesting")
f = c.fetchall()
num = f[0][0]
c.execute("select * from zxtesting", maxrows=0)
f = c.fetchall()
assert len(f) == num, "expected length [%d], got [%d]" % (num, len(f))
finally:
c.close()
self.db.commit()
def testPrimaryKey(self):
"""testing for primary key information"""
c = self.cursor()
try:
c.primarykeys(None, None, "zxtesting")
f = c.fetchall()
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
assert f[0][3].lower() == "id", "expected [id], got [%s]" % (f[0][3])
finally:
c.close()
self.db.commit()
def testForeignKey(self):
"""testing for foreign key information"""
pass
def testIndexInfo(self):
"""testing index information"""
c = self.cursor()
try:
c.statistics(None, None, "zxtesting", 0, 0)
f = c.fetchall()
assert f is not None, "expected some values"
# filter out any indicies with name None
f = filter(lambda x: x[5], f)
assert len(f) == 1, "expected [1], got [%d]" % (len(f))
finally:
c.close()
def testFetchingBeforeExecute(self):
"""testing fetch methods before execution"""
c = self.cursor()
try:
f = c.fetchall()
assert f == None, "expecting no results since no execute*() has been called"
finally:
c.close()
def testFetchingWithArraysize(self):
"""testing fetch methods using arraysize"""
c = self.cursor()
try:
c.execute("select * from zxtesting")
f = c.fetchmany()
assert len(f) == c.arraysize, "expecting [%d] rows, got [%d]" % (c.arraysize, len(f))
c.execute("select * from zxtesting")
c.arraysize = 4
f = c.fetchmany()
assert len(f) == 4, "expecting [4] rows, got [%d]" % (len(f))
c.execute("select * from zxtesting")
c.arraysize = -1
f = c.fetchmany()
assert len(f) == 7, "expecting [7] rows, got [%d]" % (len(f))
finally:
c.close()
def testBindingsWithNoParams(self):
"""testing bindings with no params"""
c = self.cursor()
try:
self.assertRaises(zxJDBC.ProgrammingError, c.execute, "select * from zxtesting", {0:zxJDBC.INTEGER})
# test an inappropriate value for a binding
self.assertRaises(zxJDBC.ProgrammingError, c.execute, "select * from zxtesting", {0:{}})
finally:
c.close()
def testDynamicCursor(self):
"""testing dynamic cursor queries"""
c = self.cursor(1)
try:
c.execute("select * from zxtesting")
f = c.fetchmany(4)
assert len(f) == 4, "expected [4] rows, got [%d]" % (len(f))
finally:
c.close()
def testRowid(self):
"""test the autoincrement facilities of the different handlers"""
assert self.has_table("autoincrementtable"), "no autoincrement table"
c = self.cursor()
assert c.lastrowid == None, "expected initial lastrowid to be None"
try:
tabname, sql = self.table("autoincrementtable")
c.execute(sql)
c.execute("insert into %s (b) values (?)" % (tabname), [(0,)])
assert c.lastrowid is not None, "lastrowid is None"
try:
for idx in range(c.lastrowid + 1, c.lastrowid + 25):
c.execute("insert into %s (b) values (?)" % (tabname), [(idx,)])
assert c.lastrowid is not None, "lastrowid is None"
self.assertEquals(idx, c.lastrowid)
except:
self.db.rollback()
finally:
if self.has_table("post_autoincrementtable"):
try:
sequence, sql = self.table("post_autoincrementtable")
c.execute(sql)
self.db.commit()
except:
self.db.rollback()
try:
c.execute("drop table %s" % (tabname))
self.db.commit()
except:
self.db.rollback()
self.db.commit()
c.close()
class LOBTest(zxJDBCTestCase):
def __blob(self, obj=0):
assert self.has_table("blobtable"), "no blob table"
tabname, sql = self.table("blobtable")
fn = tempfile.mktemp()
fp = None
c = self.cursor()
try:
hello = ("hello",) * 1024
c.execute(sql)
self.db.commit()
from java.io import FileOutputStream, FileInputStream, ObjectOutputStream, ObjectInputStream, ByteArrayInputStream
fp = FileOutputStream(fn)
oos = ObjectOutputStream(fp)
oos.writeObject(hello)
fp.close()
fp = FileInputStream(fn)
blob = ObjectInputStream(fp)
value = blob.readObject()
fp.close()
assert hello == value, "unable to serialize properly"
if obj == 1:
fp = open(fn, "rb")
else:
fp = FileInputStream(fn)
c.execute("insert into %s (a, b) values (?, ?)" % (tabname), [(0, fp)], {1:zxJDBC.BLOB})
self.db.commit()
c.execute("select * from %s" % (tabname))
f = c.fetchall()
bytes = f[0][1]
blob = ObjectInputStream(ByteArrayInputStream(bytes)).readObject()
assert hello == blob, "blobs are not equal"
finally:
c.execute("drop table %s" % (tabname))
c.close()
self.db.commit()
if os.path.exists(fn):
if fp:
fp.close()
os.remove(fn)
def testBLOBAsString(self):
"""testing BLOB as string"""
self.__blob()
def testBLOBAsPyFile(self):
"""testing BLOB as PyFile"""
self.__blob(1)
def __clob(self, asfile=0):
assert self.has_table("clobtable"), "no clob table"
tabname, sql = self.table("clobtable")
c = self.cursor()
try:
hello = "hello" * 1024 * 10
c.execute(sql)
self.db.commit()
if asfile:
fp = open(tempfile.mktemp(), "w")
fp.write(hello)
fp.flush()
fp.close()
obj = open(fp.name, "r")
else:
obj = hello
c.execute("insert into %s (a, b) values (?, ?)" % (tabname), [(0, obj)], {1:zxJDBC.CLOB})
c.execute("select * from %s" % (tabname), maxrows=1)
f = c.fetchall()
assert len(f) == 1, "expected [%d], got [%d]" % (1, len(f))
assert hello == f[0][1], "clobs are not equal"
finally:
c.execute("drop table %s" % (tabname))
c.close()
self.db.commit()
if asfile:
obj.close()
os.remove(obj.name)
def testCLOBAsString(self):
"""testing CLOB as string"""
self.__clob(0)
def testCLOBAsPyFile(self):
"""testing CLOB as PyFile"""
self.__clob(1)
class BCPTestCase(zxJDBCTestCase):
def testCSVPipe(self):
"""testing the CSV pipe"""
from java.io import PrintWriter, FileWriter
from com.ziclix.python.sql.pipe import Pipe
from com.ziclix.python.sql.pipe.db import DBSource
from com.ziclix.python.sql.pipe.csv import CSVSink
try:
src = self.connect()
fn = tempfile.mktemp(suffix="csv")
writer = PrintWriter(FileWriter(fn))
csvSink = CSVSink(writer)
c = self.cursor()
try:
c.execute("insert into zxtesting (id, name, state) values (?, ?, ?)", [(1000, 'this,has,a,comma', 'and a " quote')])
c.execute("insert into zxtesting (id, name, state) values (?, ?, ?)", [(1001, 'this,has,a,comma and a "', 'and a " quote')])
# ORACLE has a problem calling stmt.setObject(index, null)
c.execute("insert into zxtesting (id, name, state) values (?, ?, ?)", [(1010, '"this,has,a,comma"', None)], {2:zxJDBC.VARCHAR})
self.db.commit()
finally:
self.db.rollback()
c.close()
dbSource = DBSource(src, self.datahandler, "zxtesting", None, None, None)
cnt = Pipe().pipe(dbSource, csvSink) - 1 # ignore the header row
finally:
writer.close()
src.close()
os.remove(fn)
def _testXMLPipe(self):
"""testing the XML pipe"""
from java.io import PrintWriter, FileWriter
from com.ziclix.python.sql.pipe import Pipe
from com.ziclix.python.sql.pipe.db import DBSource
from com.ziclix.python.sql.pipe.xml import XMLSink
try:
src = self.connect()
fn = tempfile.mktemp(suffix="csv")
writer = PrintWriter(FileWriter(fn))
xmlSink = XMLSink(writer)
dbSource = DBSource(src, self.datahandler, "zxtesting", None, None, None)
cnt = Pipe().pipe(dbSource, xmlSink) - 1 # ignore the header row
finally:
writer.close()
src.close()
os.remove(fn)
def testDBPipe(self):
"""testing the DB pipe"""
from com.ziclix.python.sql.pipe import Pipe
from com.ziclix.python.sql.pipe.db import DBSource, DBSink
try:
src = self.connect()
dst = self.connect()
c = self.cursor()
c.execute("create table zxtestingbcp (id int not null, name varchar(20), state varchar(2), primary key (id))")
self.db.commit()
c.execute("select count(*) from zxtesting")
one = c.fetchone()[0]
c.close()
dbSource = DBSource(src, self.datahandler, "zxtesting", None, None, None)
dbSink = DBSink(dst, self.datahandler, "zxtestingbcp", None, None, 1)
cnt = Pipe().pipe(dbSource, dbSink) - 1 # ignore the header row
c = self.cursor()
c.execute("select count(*) from zxtestingbcp")
two = c.fetchone()[0]
c.execute("delete from zxtestingbcp")
self.db.commit()
c.close()
assert one == two, "expected [%d] rows in destination, got [%d] (sql)" % (one, two)
assert one == cnt, "expected [%d] rows in destination, got [%d] (bcp)" % (one, cnt)
# this tests the internal assert in BCP. we need to handle the case where we exclude
# all the rows queried (based on the fact no columns exist) but rows were fetched
# also make sure (eg, Oracle) that the column name case is ignored
dbSource = DBSource(src, self.datahandler, "zxtesting", None, ["id"], None)
dbSink = DBSink(dst, self.datahandler, "zxtestingbcp", ["id"], None, 1)
self.assertRaises(zxJDBC.Error, Pipe().pipe, dbSource, dbSink)
params = [(4,)]
dbSource = DBSource(src, self.datahandler, "zxtesting", "id > ?", None, params)
dbSink = DBSink(dst, self.datahandler, "zxtestingbcp", None, None, 1)
cnt = Pipe().pipe(dbSource, dbSink) - 1 # ignore the header row
c = self.cursor()
c.execute("select count(*) from zxtesting where id > ?", params)
one = c.fetchone()[0]
c.execute("select count(*) from zxtestingbcp")
two = c.fetchone()[0]
c.close()
assert one == two, "expected [%d] rows in destination, got [%d] (sql)" % (one, two)
assert one == cnt, "expected [%d] rows in destination, got [%d] (bcp)" % (one, cnt)
finally:
try:
c = self.cursor()
try:
c.execute("drop table zxtestingbcp")
self.db.commit()
except:
self.db.rollback()
finally:
c.close()
try:
src.close()
except:
src = None
try:
dst.close()
except:
dst = None
def testBCP(self):
"""testing bcp parameters and functionality"""
from com.ziclix.python.sql.util import BCP
import dbexts
try:
src = self.connect()
dst = self.connect()
c = self.cursor()
c.execute("create table zxtestingbcp (id int not null, name varchar(20), state varchar(2), primary key (id))")
self.db.commit()
c.execute("select count(*) from zxtesting")
one = c.fetchone()[0]
c.close()
b = BCP(src, dst)
if hasattr(self, "datahandler"):
b.sourceDataHandler = self.datahandler
b.destinationDataHandler = self.datahandler
cnt = b.bcp("zxtesting", toTable="zxtestingbcp")
c = self.cursor()
c.execute("select count(*) from zxtestingbcp")
two = c.fetchone()[0]
c.execute("delete from zxtestingbcp")
self.db.commit()
c.close()
assert one == two, "expected [%d] rows in destination, got [%d] (sql)" % (one, two)
assert one == cnt, "expected [%d] rows in destination, got [%d] (bcp)" % (one, cnt)
# this tests the internal assert in BCP. we need to handle the case where we exclude
# all the rows queried (based on the fact no columns exist) but rows were fetched
# also make sure (eg, Oracle) that the column name case is ignored
self.assertRaises(zxJDBC.Error, b.bcp, "zxtesting", toTable="zxtestingbcp", include=["id"], exclude=["id"])
params = [(4,)]
cnt = b.bcp("zxtesting", "id > ?", params, toTable="zxtestingbcp")
c = self.cursor()
c.execute("select count(*) from zxtesting where id > ?", params)
one = c.fetchone()[0]
c.execute("select count(*) from zxtestingbcp")
two = c.fetchone()[0]
c.close()
assert one == two, "expected [%d] rows in destination, got [%d] (sql)" % (one, two)
assert one == cnt, "expected [%d] rows in destination, got [%d] (bcp)" % (one, cnt)
finally:
try:
c = self.cursor()
try:
c.execute("drop table zxtestingbcp")
self.db.commit()
except:
self.db.rollback()
finally:
c.close()
try:
src.close()
except:
src = None
try:
dst.close()
except:
dst = None
| carvalhomb/tsmells | guess/src/Lib/test/zxjdbc/zxtest.py | Python | gpl-2.0 | 29,195 |
# -*- coding: utf-8 -*-
"""
Pdb debugger class.
Modified from the standard pdb.Pdb class to avoid including readline, so that
the command line completion of other programs which include this isn't
damaged.
In the future, this class will be expanded with improvements over the standard
pdb.
The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
changes. Licensing should therefore be under the standard Python terms. For
details on the PSF (Python Software Foundation) standard license, see:
http://www.python.org/2.2.3/license.html"""
#*****************************************************************************
#
# This file is licensed under the PSF license.
#
# Copyright (C) 2001 Python Software Foundation, www.python.org
# Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
#
#
#*****************************************************************************
from __future__ import print_function
import bdb
import functools
import inspect
import sys
from IPython import get_ipython
from IPython.utils import PyColorize, ulinecache
from IPython.utils import coloransi, io, py3compat
from IPython.core.excolors import exception_colors
from IPython.testing.skipdoctest import skip_doctest
# See if we can use pydb.
has_pydb = False
prompt = 'ipdb> '
#We have to check this directly from sys.argv, config struct not yet available
if '--pydb' in sys.argv:
try:
import pydb
if hasattr(pydb.pydb, "runl") and pydb.version>'1.17':
# Version 1.17 is broken, and that's what ships with Ubuntu Edgy, so we
# better protect against it.
has_pydb = True
except ImportError:
print("Pydb (http://bashdb.sourceforge.net/pydb/) does not seem to be available")
if has_pydb:
from pydb import Pdb as OldPdb
prompt = 'ipydb> '
else:
from pdb import Pdb as OldPdb
# Allow the set_trace code to operate outside of an ipython instance, even if
# it does so with some limitations. The rest of this support is implemented in
# the Tracer constructor.
def BdbQuit_excepthook(et, ev, tb, excepthook=None):
"""Exception hook which handles `BdbQuit` exceptions.
All other exceptions are processed using the `excepthook`
parameter.
"""
if et==bdb.BdbQuit:
print('Exiting Debugger.')
elif excepthook is not None:
excepthook(et, ev, tb)
else:
# Backwards compatibility. Raise deprecation warning?
BdbQuit_excepthook.excepthook_ori(et,ev,tb)
def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
print('Exiting Debugger.')
class Tracer(object):
"""Class for local debugging, similar to pdb.set_trace.
Instances of this class, when called, behave like pdb.set_trace, but
providing IPython's enhanced capabilities.
This is implemented as a class which must be initialized in your own code
and not as a standalone function because we need to detect at runtime
whether IPython is already active or not. That detection is done in the
constructor, ensuring that this code plays nicely with a running IPython,
while functioning acceptably (though with limitations) if outside of it.
"""
@skip_doctest
def __init__(self, colors=None):
"""Create a local debugger instance.
Parameters
----------
colors : str, optional
The name of the color scheme to use, it must be one of IPython's
valid color schemes. If not given, the function will default to
the current IPython scheme when running inside IPython, and to
'NoColor' otherwise.
Examples
--------
::
from IPython.core.debugger import Tracer; debug_here = Tracer()
Later in your code::
debug_here() # -> will open up the debugger at that point.
Once the debugger activates, you can use all of its regular commands to
step through code, set breakpoints, etc. See the pdb documentation
from the Python standard library for usage details.
"""
ip = get_ipython()
if ip is None:
# Outside of ipython, we set our own exception hook manually
sys.excepthook = functools.partial(BdbQuit_excepthook,
excepthook=sys.excepthook)
def_colors = 'NoColor'
try:
# Limited tab completion support
import readline
readline.parse_and_bind('tab: complete')
except ImportError:
pass
else:
# In ipython, we use its custom exception handler mechanism
def_colors = ip.colors
ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
if colors is None:
colors = def_colors
# The stdlib debugger internally uses a modified repr from the `repr`
# module, that limits the length of printed strings to a hardcoded
# limit of 30 characters. That much trimming is too aggressive, let's
# at least raise that limit to 80 chars, which should be enough for
# most interactive uses.
try:
try:
from reprlib import aRepr # Py 3
except ImportError:
from repr import aRepr # Py 2
aRepr.maxstring = 80
except:
# This is only a user-facing convenience, so any error we encounter
# here can be warned about but can be otherwise ignored. These
# printouts will tell us about problems if this API changes
import traceback
traceback.print_exc()
self.debugger = Pdb(colors)
def __call__(self):
"""Starts an interactive debugger at the point where called.
This is similar to the pdb.set_trace() function from the std lib, but
using IPython's enhanced debugger."""
self.debugger.set_trace(sys._getframe().f_back)
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
"""Make new_fn have old_fn's doc string. This is particularly useful
for the ``do_...`` commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth."""
def wrapper(*args, **kw):
return new_fn(*args, **kw)
if old_fn.__doc__:
wrapper.__doc__ = old_fn.__doc__ + additional_text
return wrapper
def _file_lines(fname):
"""Return the contents of a named file as a list of lines.
This function never raises an IOError exception: if the file can't be
read, it simply returns an empty list."""
try:
outfile = open(fname)
except IOError:
return []
else:
out = outfile.readlines()
outfile.close()
return out
class Pdb(OldPdb):
"""Modified Pdb class, does not load readline."""
def __init__(self,color_scheme='NoColor',completekey=None,
stdin=None, stdout=None, context=5):
# Parent constructor:
try:
self.context=int(context)
if self.context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
if has_pydb and completekey is None:
OldPdb.__init__(self,stdin=stdin,stdout=io.stdout)
else:
OldPdb.__init__(self,completekey,stdin,stdout)
# IPython changes...
self.is_pydb = has_pydb
self.shell = get_ipython()
if self.shell is None:
# No IPython instance running, we must create one
from IPython.terminal.interactiveshell import \
TerminalInteractiveShell
self.shell = TerminalInteractiveShell.instance()
if self.is_pydb:
# interactiveshell.py's ipalias seems to want pdb's checkline
# which located in pydb.fn
import pydb.fns
self.checkline = lambda filename, lineno: \
pydb.fns.checkline(self, filename, lineno)
self.curframe = None
self.do_restart = self.new_do_restart
self.old_all_completions = self.shell.Completer.all_completions
self.shell.Completer.all_completions=self.all_completions
self.do_list = decorate_fn_with_doc(self.list_command_pydb,
OldPdb.do_list)
self.do_l = self.do_list
self.do_frame = decorate_fn_with_doc(self.new_do_frame,
OldPdb.do_frame)
self.aliases = {}
# Create color table: we copy the default one from the traceback
# module and add a few attributes needed for debugging
self.color_scheme_table = exception_colors()
# shorthands
C = coloransi.TermColors
cst = self.color_scheme_table
cst['NoColor'].colors.prompt = C.NoColor
cst['NoColor'].colors.breakpoint_enabled = C.NoColor
cst['NoColor'].colors.breakpoint_disabled = C.NoColor
cst['Linux'].colors.prompt = C.Green
cst['Linux'].colors.breakpoint_enabled = C.LightRed
cst['Linux'].colors.breakpoint_disabled = C.Red
cst['LightBG'].colors.prompt = C.Blue
cst['LightBG'].colors.breakpoint_enabled = C.LightRed
cst['LightBG'].colors.breakpoint_disabled = C.Red
self.set_colors(color_scheme)
# Add a python parser so we can syntax highlight source while
# debugging.
self.parser = PyColorize.Parser()
# Set the prompt
Colors = cst.active_colors
self.prompt = u'%s%s%s' % (Colors.prompt, prompt, Colors.Normal) # The default prompt is '(Pdb)'
def set_colors(self, scheme):
"""Shorthand access to the color table scheme selector method."""
self.color_scheme_table.set_active_scheme(scheme)
def interaction(self, frame, traceback):
self.shell.set_completer_frame(frame)
while True:
try:
OldPdb.interaction(self, frame, traceback)
break
except KeyboardInterrupt:
self.shell.write('\n' + self.shell.get_exception_only())
break
finally:
# Pdb sets readline delimiters, so set them back to our own
if self.shell.readline is not None:
self.shell.readline.set_completer_delims(self.shell.readline_delims)
def new_do_up(self, arg):
OldPdb.do_up(self, arg)
self.shell.set_completer_frame(self.curframe)
do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
def new_do_down(self, arg):
OldPdb.do_down(self, arg)
self.shell.set_completer_frame(self.curframe)
do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
def new_do_frame(self, arg):
OldPdb.do_frame(self, arg)
self.shell.set_completer_frame(self.curframe)
def new_do_quit(self, arg):
if hasattr(self, 'old_all_completions'):
self.shell.Completer.all_completions=self.old_all_completions
return OldPdb.do_quit(self, arg)
do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
def new_do_restart(self, arg):
"""Restart command. In the context of ipython this is exactly the same
thing as 'quit'."""
self.msg("Restart doesn't make sense here. Using 'quit' instead.")
return self.do_quit(arg)
def postloop(self):
self.shell.set_completer_frame(None)
def print_stack_trace(self, context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno, context=context)
except KeyboardInterrupt:
pass
def print_stack_entry(self,frame_lineno,prompt_prefix='\n-> ',
context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
print(self.format_stack_entry(frame_lineno, '', context), file=io.stdout)
# vds: >>
frame, lineno = frame_lineno
filename = frame.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
print("Context must be a positive integer")
except (TypeError, ValueError):
print("Context must be a positive integer")
try:
import reprlib # Py 3
except ImportError:
import repr as reprlib # Py 2
ret = []
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
ColorsNormal)
frame, lineno = frame_lineno
return_value = ''
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
#return_value += '->'
return_value += reprlib.repr(rv) + '\n'
ret.append(return_value)
#s = filename + '(' + `lineno` + ')'
filename = self.canonic(frame.f_code.co_filename)
link = tpl_link % py3compat.cast_unicode(filename)
if frame.f_code.co_name:
func = frame.f_code.co_name
else:
func = "<lambda>"
call = ''
if func != '?':
if '__args__' in frame.f_locals:
args = reprlib.repr(frame.f_locals['__args__'])
else:
args = '()'
call = tpl_call % (func, args)
# The level info should be generated in the same format pdb uses, to
# avoid breaking the pdbtrack functionality of python-mode in *emacs.
if frame is self.curframe:
ret.append('> ')
else:
ret.append(' ')
ret.append(u'%s(%s)%s\n' % (link,lineno,call))
start = lineno - 1 - context//2
lines = ulinecache.getlines(filename)
start = min(start, len(lines) - context)
start = max(start, 0)
lines = lines[start : start + context]
for i,line in enumerate(lines):
show_arrow = (start + 1 + i == lineno)
linetpl = (frame is self.curframe or show_arrow) \
and tpl_line_em \
or tpl_line
ret.append(self.__format_line(linetpl, filename,
start + 1 + i, line,
arrow = show_arrow) )
return ''.join(ret)
def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
bp_mark = ""
bp_mark_color = ""
scheme = self.color_scheme_table.active_scheme_name
new_line, err = self.parser.format2(line, 'str', scheme)
if not err: line = new_line
bp = None
if lineno in self.get_file_breaks(filename):
bps = self.get_breaks(filename, lineno)
bp = bps[-1]
if bp:
Colors = self.color_scheme_table.active_colors
bp_mark = str(bp.number)
bp_mark_color = Colors.breakpoint_enabled
if not bp.enabled:
bp_mark_color = Colors.breakpoint_disabled
numbers_width = 7
if arrow:
# This is the line with the error
pad = numbers_width - len(str(lineno)) - len(bp_mark)
if pad >= 3:
marker = '-'*(pad-3) + '-> '
elif pad == 2:
marker = '> '
elif pad == 1:
marker = '>'
else:
marker = ''
num = '%s%s' % (marker, str(lineno))
line = tpl_line % (bp_mark_color + bp_mark, num, line)
else:
num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
line = tpl_line % (bp_mark_color + bp_mark, num, line)
return line
def list_command_pydb(self, arg):
"""List command to use if we have a newer pydb installed"""
filename, first, last = OldPdb.parse_list_cmd(self, arg)
if filename is not None:
self.print_list_lines(filename, first, last)
def print_list_lines(self, filename, first, last):
"""The printing (as opposed to the parsing part of a 'list'
command."""
try:
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
src = []
if filename == "<string>" and hasattr(self, "_exec_filename"):
filename = self._exec_filename
for lineno in range(first, last+1):
line = ulinecache.getline(filename, lineno)
if not line:
break
if lineno == self.curframe.f_lineno:
line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
else:
line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
src.append(line)
self.lineno = lineno
print(''.join(src), file=io.stdout)
except KeyboardInterrupt:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print('*** Error in argument:', repr(arg))
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
self.print_list_lines(self.curframe.f_code.co_filename, first, last)
# vds: >>
lineno = first
filename = self.curframe.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
do_l = do_list
def getsourcelines(self, obj):
lines, lineno = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
# must be a module frame: do not try to cut a block out of it
return lines, 1
elif inspect.ismodule(obj):
return lines, 1
return inspect.getblock(lines[lineno:]), lineno+1
def do_longlist(self, arg):
self.lastcmd = 'longlist'
filename = self.curframe.f_code.co_filename
try:
lines, lineno = self.getsourcelines(self.curframe)
except OSError as err:
self.error(err)
return
last = lineno + len(lines)
self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
do_ll = do_longlist
def do_pdef(self, arg):
"""Print the call signature for any callable object.
The debugger interface to %pdef"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
def do_pdoc(self, arg):
"""Print the docstring for an object.
The debugger interface to %pdoc."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
def do_pfile(self, arg):
"""Print (or run through pager) the file where an object is defined.
The debugger interface to %pfile.
"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
def do_pinfo(self, arg):
"""Provide detailed information about an object.
The debugger interface to %pinfo, i.e., obj?."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
def do_pinfo2(self, arg):
"""Provide extra detailed information about an object.
The debugger interface to %pinfo2, i.e., obj??."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
def do_psource(self, arg):
"""Print (or run through pager) the source code for an object."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
if sys.version_info > (3, ):
def do_where(self, arg):
"""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.
Take a number as argument as an (optional) number of context line to
print"""
if arg:
context = int(arg)
self.print_stack_trace(context)
else:
self.print_stack_trace()
do_w = do_where
| boompieman/iim_project | project_python2/lib/python2.7/site-packages/IPython/core/debugger.py | Python | gpl-3.0 | 22,880 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ActividadEmpresarial.tema_1'
db.delete_column(u'politicas_actividadempresarial', 'tema_1')
# Deleting field 'ActividadEmpresarial.tema_2'
db.delete_column(u'politicas_actividadempresarial', 'tema_2')
# Deleting field 'ActividadEmpresarial.actividad'
db.delete_column(u'politicas_actividadempresarial', 'actividad_id')
# Deleting field 'ActividadEmpresarial.tema_3'
db.delete_column(u'politicas_actividadempresarial', 'tema_3')
# Adding field 'ActividadEmpresarial.temas'
db.add_column(u'politicas_actividadempresarial', 'temas',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['politicas.TemasIncidencia']),
keep_default=False)
# Adding M2M table for field actividad on 'ActividadEmpresarial'
m2m_table_name = db.shorten_name(u'politicas_actividadempresarial_actividad')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('actividadempresarial', models.ForeignKey(orm[u'politicas.actividadempresarial'], null=False)),
('actividadesespacio', models.ForeignKey(orm[u'politicas.actividadesespacio'], null=False))
))
db.create_unique(m2m_table_name, ['actividadempresarial_id', 'actividadesespacio_id'])
def backwards(self, orm):
# Adding field 'ActividadEmpresarial.tema_1'
db.add_column(u'politicas_actividadempresarial', 'tema_1',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'ActividadEmpresarial.tema_2'
db.add_column(u'politicas_actividadempresarial', 'tema_2',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'ActividadEmpresarial.actividad'
db.add_column(u'politicas_actividadempresarial', 'actividad',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['politicas.ActividadesEspacio']),
keep_default=False)
# Adding field 'ActividadEmpresarial.tema_3'
db.add_column(u'politicas_actividadempresarial', 'tema_3',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Deleting field 'ActividadEmpresarial.temas'
db.delete_column(u'politicas_actividadempresarial', 'temas_id')
# Removing M2M table for field actividad on 'ActividadEmpresarial'
db.delete_table(db.shorten_name(u'politicas_actividadempresarial_actividad'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.actividadempresarial': {
'Meta': {'object_name': 'ActividadEmpresarial'},
'actividad': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['politicas.ActividadesEspacio']", 'symmetrical': 'False'}),
'espacio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['politicas.EspacioInnovacion']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'temas': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['politicas.TemasIncidencia']"})
},
u'politicas.actividadesespacio': {
'Meta': {'object_name': 'ActividadesEspacio'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.actividadiniciativa': {
'Meta': {'object_name': 'ActividadIniciativa'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.espacioinnovacion': {
'Meta': {'object_name': 'EspacioInnovacion'},
'activos': ('django.db.models.fields.IntegerField', [], {}),
'celular_contacto': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cobertura': ('django.db.models.fields.IntegerField', [], {}),
'correo_contacto': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'departamento_influye': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lugar.Departamento']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identificador': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'municipios_influye': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lugar.Municipio']", 'symmetrical': 'False'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'nombre_contacto': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'numero_entidades': ('django.db.models.fields.IntegerField', [], {}),
'papel': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['politicas.PapelSimas']", 'symmetrical': 'False'}),
'temas': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['politicas.TemasIncidencia']", 'symmetrical': 'False'}),
'tiempo_formado': ('django.db.models.fields.IntegerField', [], {}),
'tipos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['politicas.TipoEspacio']"}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'zona': ('django.db.models.fields.IntegerField', [], {})
},
u'politicas.fotosiniciativa': {
'Meta': {'object_name': 'FotosIniciativa'},
'foto': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iniciativa': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['politicas.IniciativaInnovacion']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.fotosinnovacion': {
'Meta': {'object_name': 'FotosInnovacion'},
'espacio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['politicas.EspacioInnovacion']"}),
'foto': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.iniciativainnovacion': {
'Meta': {'object_name': 'IniciativaInnovacion'},
'acceso_mercado': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'actividades': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['politicas.ActividadIniciativa']", 'null': 'True', 'blank': 'True'}),
'analisis': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'anio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comunicacion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'conservacion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enfoque': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'espacio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['politicas.EspacioInnovacion']"}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'fecha_finalizacion': ('django.db.models.fields.DateField', [], {}),
'fecha_inicio': ('django.db.models.fields.DateField', [], {}),
'fomento': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inversion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'problema': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reduccion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resultado': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sobre_tierra': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'temas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['politicas.TemasAborda']", 'null': 'True', 'blank': 'True'}),
'tipo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['politicas.TipoIniciativa']"}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'politicas.papelsimas': {
'Meta': {'object_name': 'PapelSimas'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.temasaborda': {
'Meta': {'object_name': 'TemasAborda'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.temasincidencia': {
'Meta': {'object_name': 'TemasIncidencia'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.tipoespacio': {
'Meta': {'object_name': 'TipoEspacio'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'politicas.tipoiniciativa': {
'Meta': {'object_name': 'TipoIniciativa'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['politicas'] | CARocha/simasinnovacion | politicas/migrations/0004_auto__del_field_actividadempresarial_tema_1__del_field_actividadempres.py | Python | mit | 16,202 |
#!/usr/bin/python
# updates the copyright information for all .cs files
# usage: call recursive_traversal, with the following parameters
# parent directory, old copyright text content, new copyright text content
import os
#excludedir = ["..\\Lib"]
excludedir = []
def update_source(filename, oldcopyright, copyright):
#print filename
#return
utfstr = chr(0xef)+chr(0xbb)+chr(0xbf)
fdata = file(filename,"r+").read()
isUTF = False
if (fdata.startswith(utfstr)):
isUTF = True
fdata = fdata[3:]
print "UTF"
if (oldcopyright != None):
if (fdata.startswith(oldcopyright)):
fdata = fdata[len(oldcopyright):]
if not (fdata.startswith(copyright)):
print "updating "+filename
fdata = copyright + fdata
if (isUTF):
file(filename,"w").write(utfstr+fdata)
else:
file(filename,"w").write(fdata)
def recursive_traversal(dir, oldcopyright, copyright):
global excludedir
fns = os.listdir(dir)
#print "listing "+dir
for fn in fns:
fullfn = os.path.join(dir,fn)
if (fullfn in excludedir):
continue
if (os.path.isdir(fullfn)):
recursive_traversal(fullfn, oldcopyright, copyright)
else:
if (fullfn.endswith(".java") or fullfn.endswith("shader.txt")):
update_source(fullfn, oldcopyright, copyright)
#oldcright = file("oldcr.txt","r+").read()
#cright = file("copyright.txt","r+").read()
#recursive_traversal("..", oldcright, cright)
lic = "/* Use of this source code is governed by a BSD-style license that can be found in the LICENSE file */\n"
alex = "/* COPYRIGHT (C) 2012-2013 Alexander Taran. All Rights Reserved. */\n" + lic
vova = "/* COPYRIGHT (C) 2013 Vladimir Losev. All Rights Reserved. */\n" + lic
recursive_traversal("AndroidOpenGL/src/alex", None, alex)
recursive_traversal("AndroidOpenGL/src/vladimir", None, vova)
recursive_traversal("PicworldTest/src", None, alex)
exit()
| AlexTaran/picworld | copyright.py | Python | bsd-3-clause | 2,007 |
import dataStructures
import logging, os
log = logging.getLogger("tray_item")
log.setLevel(logging.WARN)
class TrayItem:
"""
Parent Class for all items in a tray.
"""
def __init__(self):
self.selected = False
self.changed = False
dataStructures.changingItems.append(self)
self.fields = []
def SetSelected(self, value):
self.selected = value
def SetChanged(self, state):
self.changed = state
if state:
#import traceback
#traceback.print_stack()
log.debug("TrayItem change registered for %s", self.element)
def Clone(self):
clone = TrayItem()
clone.selected = self.selected
clone.data = self.data.copy()
clone.fields = self.fields
return clone
| tschalch/pyTray | src/dataStructures/tray_item.py | Python | bsd-3-clause | 847 |
#
# Copyright (C) 2013 Stefano Sanfilippo
# Copyright (C) 2013 BITS development team
#
# This file is part of bitsd, which is released under the terms of
# GNU GPLv3. See COPYING at top level for more information.
#
from bitsd.persistence.engine import session_scope
from bitsd.persistence.models import Status
from sqlalchemy import asc
class PresenceForecaster:
class InvalidResolutionError(Exception):
def __init__(self):
self.message = "Resolution must be a submultiple of 60 minutes!"
def __init__(self, resolution=30, samples_cont=5000):
if self.resolution_is_invalid(resolution):
raise self.InvalidResolutionError()
self.samples_count = samples_cont
self.ticks_per_hour = 60 / resolution
self.minutes_per_tick = resolution
def forecast(self):
#TODO caching
return self.calculate_frequencies()
def calculate_frequencies(self):
samples = self.get_samples()
buckets = self.count_presence_per_slot(samples)
return self.normalize(buckets)
def count_presence_per_slot(self, samples):
buckets = self.init_buckets()
# TODO algorithm here
return buckets
def init_buckets(self):
return [[0] * (24 * self.ticks_per_hour) for i in range(7)]
def get_samples(self):
with session_scope() as session:
samples = session \
.query(Status.timestamp, Status.value) \
.filter((Status.value == Status.OPEN) | (Status.value == Status.CLOSED)) \
.order_by(asc(Status.timestamp)) \
.limit(self.samples_count)
offset = self.first_open_offset(samples)
return samples[offset:]
def first_open_offset(self, samples):
offset = 0
while samples[offset].value != Status.OPEN:
offset += 1
return offset
@staticmethod
def resolution_is_invalid(resolution):
return (60 % resolution) != 0
def calculate_coordinates(self, sample):
timestamp = sample.timestamp
weekday = timestamp.weekday()
timeslot = (self.ticks_per_hour * timestamp.hour) + int(1. * timestamp.minute / self.minutes_per_tick)
return weekday, timeslot
def normalize(self, buckets):
for day in buckets:
for i, slot in enumerate(day):
day[i] = 1. * slot / self.samples_count
return buckets | BitsDevelopmentTeam/bits-server | bitsd/server/presence.py | Python | gpl-3.0 | 2,442 |
import urllib.request as u
import bs4 as bs
# link to the article at The Seattle Times
st_url = 'http://www.seattletimes.com/nation-world/obama-starts-2016-with-a-fight-over-gun-control/'
# read the contents of the webpage
with u.urlopen(st_url) as response:
html = response.read()
# using beautiful soup -- let's parse the content of the HTML
read = bs.BeautifulSoup(html, 'html5lib')
# find the article tag
article = read.find('article')
# find all the paragraphs of the article
all_ps = article.find_all('p')
# object to hold the body of text
body_of_text = []
# get the tile
body_of_text.append(read.title.get_text())
print(read.title)
# put all the paragraphs to the body of text list
for p in all_ps:
body_of_text.append(p.get_text())
# we don't need some of the parts at the bottom of the page
body_of_text = body_of_text[:24]
# let's see what we got
print('\n'.join(body_of_text))
# and save it to a file
with open('../../Data/Chapter09/ST_gunLaws.txt', 'w') as f:
f.write('\n'.join(body_of_text)) | drabastomek/practicalDataAnalysisCookbook | Codes/Chapter09/nlp_read.py | Python | gpl-2.0 | 1,028 |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class WagtailStyleGuideAppConfig(AppConfig):
name = "wagtail.contrib.styleguide"
label = "wagtailstyleguide"
verbose_name = _("Wagtail style guide")
| wagtail/wagtail | wagtail/contrib/styleguide/apps.py | Python | bsd-3-clause | 252 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-31 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gold', '0002_rename_last_4_digits'),
]
operations = [
migrations.AlterField(
model_name='golduser',
name='level',
field=models.CharField(choices=[('v1-org-5', '$5/mo'), ('v1-org-10', '$10/mo'), ('v1-org-15', '$15/mo'), ('v1-org-20', '$20/mo'), ('v1-org-50', '$50/mo'), ('v1-org-100', '$100/mo')], default='v1-org-5', max_length=20, verbose_name='Level'),
),
]
| rtfd/readthedocs.org | readthedocs/gold/migrations/0003_add_missing_model_change_migrations.py | Python | mit | 613 |
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.org/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import wx
from time import sleep
# Local imports
import eg
from eg.Icons import GetInternalBitmap
from eg.WinApi import (
EnumProcesses,
GetClassName,
GetProcessName,
GetTopLevelWindowList,
GetWindowText,
GetWindowThreadProcessId,
)
from eg.WinApi.Dynamic import (
GA_PARENT,
GA_ROOT,
GetAncestor,
)
from eg.WinApi.Utils import (
GetHwndChildren,
GetHwndIcon,
HighlightWindow,
HwndHasChildren,
)
class WindowTree(wx.TreeCtrl):
def __init__(self, parent, includeInvisible=False):
self.includeInvisible = includeInvisible
self.pids = {}
wx.TreeCtrl.__init__(
self,
parent,
-1,
style=(
wx.TR_DEFAULT_STYLE |
wx.TR_HIDE_ROOT |
wx.TR_FULL_ROW_HIGHLIGHT
),
size=(-1, 150)
)
self.imageList = imageList = wx.ImageList(16, 16)
imageList.Add(GetInternalBitmap("cwindow"))
imageList.Add(GetInternalBitmap("cedit"))
imageList.Add(GetInternalBitmap("cstatic"))
imageList.Add(GetInternalBitmap("cbutton"))
self.SetImageList(imageList)
self.root = self.AddRoot("")
# tree context menu
def OnCmdHighlight(dummyEvent=None):
hwnd = self.GetPyData(self.GetSelection())
for _ in range(10):
HighlightWindow(hwnd)
sleep(0.1)
menu = wx.Menu()
menuId = wx.NewId()
menu.Append(menuId, "Highlight")
self.Bind(wx.EVT_MENU, OnCmdHighlight, id=menuId)
self.contextMenu = menu
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.OnItemRightClick)
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.OnItemExpanding)
self.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed)
self.AppendPrograms()
if eg.debugLevel:
@eg.LogIt
def __del__(self):
pass
def AppendChildWindows(self, parentHwnd, item):
for hwnd in GetHwndChildren(parentHwnd, self.includeInvisible):
name = GetWindowText(hwnd)
className = GetClassName(hwnd)
if name != "":
name = "\"" + name + "\" "
index = self.AppendItem(item, name + className, 0)
self.SetPyData(index, hwnd)
if className == "Edit" or className == "TEdit":
self.SetItemImage(index, 1, which=wx.TreeItemIcon_Normal)
elif className == "Static" or className == "TStaticText":
self.SetItemImage(index, 2, which=wx.TreeItemIcon_Normal)
elif className == "Button" or className == "TButton":
self.SetItemImage(index, 3, which=wx.TreeItemIcon_Normal)
elif GetClassName(parentHwnd) == "MDIClient":
icon = GetHwndIcon(hwnd)
if icon:
iconIndex = self.imageList.AddIcon(icon)
self.SetItemImage(
index,
iconIndex,
which=wx.TreeItemIcon_Normal
)
if HwndHasChildren(hwnd, self.includeInvisible):
self.SetItemHasChildren(index, True)
def AppendPrograms(self):
self.pids.clear()
processes = EnumProcesses() # get PID list
for pid in processes:
self.pids[pid] = []
hwnds = GetTopLevelWindowList(self.includeInvisible)
for hwnd in hwnds:
pid = GetWindowThreadProcessId(hwnd)[1]
if pid == eg.processId:
continue
self.pids[pid].append(hwnd)
for pid in processes:
if len(self.pids[pid]) == 0:
continue
iconIndex = 0
for hwnd in self.pids[pid]:
icon = GetHwndIcon(hwnd)
if icon:
iconIndex = self.imageList.AddIcon(icon)
break
exe = GetProcessName(pid)
item = self.AppendItem(self.root, exe)
self.SetItemHasChildren(item, True)
self.SetPyData(item, pid)
self.SetItemImage(item, iconIndex, which=wx.TreeItemIcon_Normal)
def AppendToplevelWindows(self, pid, item):
hwnds = self.pids[pid]
for hwnd in hwnds:
try:
name = GetWindowText(hwnd)
className = GetClassName(hwnd)
icon = GetHwndIcon(hwnd)
except:
continue
if name != '':
name = '"%s"' % name
iconIndex = 0
if icon:
iconIndex = self.imageList.AddIcon(icon)
newItem = self.AppendItem(item, name)
self.SetPyData(newItem, hwnd)
self.SetItemText(newItem, name + className)
self.SetItemImage(
newItem,
iconIndex,
which=wx.TreeItemIcon_Normal
)
if HwndHasChildren(hwnd, self.includeInvisible):
self.SetItemHasChildren(newItem, True)
@eg.LogIt
def Destroy(self):
self.Unselect()
self.imageList.Destroy()
return wx.TreeCtrl.Destroy(self)
def OnItemCollapsed(self, event):
"""
Handles wx.EVT_TREE_ITEM_COLLAPSED events.
"""
# We need to remove all children here, otherwise we'll see all
# that old rubbish again after the next expansion.
self.DeleteChildren(event.GetItem())
def OnItemExpanding(self, event):
"""
Handles wx.EVT_TREE_ITEM_EXPANDING events.
"""
item = event.GetItem()
if self.IsExpanded(item):
# This event can happen twice in the self.Expand call
return
res = self.GetItemParent(item)
if res == self.root:
pid = self.GetPyData(item)
self.AppendToplevelWindows(pid, item)
else:
hwnd = self.GetPyData(item)
self.AppendChildWindows(hwnd, item)
def OnItemRightClick(self, dummyEvent):
"""
Handles wx.EVT_TREE_ITEM_RIGHT_CLICK events.
"""
self.PopupMenu(self.contextMenu)
@eg.LogIt
def Refresh(self):
self.Freeze()
self.DeleteChildren(self.root)
self.AppendPrograms()
self.Thaw()
@eg.LogIt
def SelectHwnd(self, hwnd):
if hwnd is None:
self.Unselect()
return
_, pid = GetWindowThreadProcessId(hwnd)
item, cookie = self.GetFirstChild(self.root)
while self.GetPyData(item) != pid:
item, cookie = self.GetNextChild(self.root, cookie)
if not item.IsOk():
return
chain = [hwnd]
rootHwnd = GetAncestor(hwnd, GA_ROOT)
tmp = hwnd
while tmp != rootHwnd:
tmp = GetAncestor(tmp, GA_PARENT)
chain.append(tmp)
lastItem = item
for child in chain[::-1]:
self.Expand(item)
item, cookie = self.GetFirstChild(lastItem)
while self.GetPyData(item) != child:
item, cookie = self.GetNextChild(lastItem, cookie)
if not item.IsOk():
return
lastItem = item
self.SelectItem(lastItem)
| topic2k/EventGhost | eg/Classes/WindowTree.py | Python | gpl-2.0 | 8,073 |
"""Module testing the kale.publisher module."""
from __future__ import absolute_import
import mock
import unittest
from kale import exceptions
from kale import message
from kale import publisher
from kale import settings
from kale import sqs
from kale import test_utils
class PublisherTestCase(unittest.TestCase):
"""Test publisher logic."""
def test_publish(self):
"""Test publisher logic."""
sqs_inst = sqs.SQSTalk()
with mock.patch(
'kale.queue_info.QueueInfo.get_queue') as mock_get_queue:
mock_queue = mock.MagicMock()
mock_queue.visibility_timeout_sec = 10
mock_get_queue.return_value = mock_queue
mock_publisher = publisher.Publisher(sqs_inst)
mock_publisher._get_or_create_queue = mock.MagicMock()
payload = {'args': [], 'kwargs': {}}
mock_task_class = mock.MagicMock()
mock_task_class.time_limit = 2
mock_task_class.__name__ = 'task'
with mock.patch('kale.message.KaleMessage') as mock_message:
mock_message.create_message.return_value = mock.MagicMock()
mock_publisher.publish(mock_task_class, 1, payload)
def test_publish_with_app_data(self):
"""Test publisher logic."""
sqs_inst = sqs.SQSTalk()
with mock.patch(
'kale.queue_info.QueueInfo.get_queue') as mock_get_queue:
mock_queue = mock.MagicMock()
mock_queue.visibility_timeout_sec = 10
mock_get_queue.return_value = mock_queue
mock_publisher = publisher.Publisher(sqs_inst)
mock_publisher._get_or_create_queue = mock.MagicMock()
payload = {'args': [], 'kwargs': {}, 'app_data': {}}
mock_task_class = mock.MagicMock()
mock_task_class.time_limit = 2
mock_task_class.__name__ = 'task'
with mock.patch('kale.message.KaleMessage') as mock_message:
mock_message.create_message.return_value = mock.MagicMock()
mock_publisher.publish(mock_task_class, 1, payload)
def test_publish_messages_to_dead_letter_queue(self):
"""Test publisher to DLQ logic."""
sqs_inst = sqs.SQSTalk()
mock_publisher = publisher.Publisher(sqs_inst)
mock_queue = mock.MagicMock()
mock_publisher._get_or_create_queue = mock.MagicMock(
return_value=mock_queue)
payload = {'args': [], 'kwargs': {}}
kale_msg = message.KaleMessage(
task_class=test_utils.MockTask,
task_id=test_utils.MockTask._get_task_id(),
payload=payload,
current_retry_num=5)
kale_msg.id = 'test-id'
test_body = 'test-body'
kale_msg.encode = mock.MagicMock(return_value=test_body)
mock_messages = [kale_msg]
with mock.patch.object(mock_queue, 'send_messages') as mock_write:
mock_publisher.publish_messages_to_dead_letter_queue(
'dlq_name', mock_messages)
expected_args = [{'Id': kale_msg.id, 'MessageBody': test_body, 'DelaySeconds': 0}]
mock_write.assert_called_once_with(Entries=expected_args)
def test_publish_bad_time_limit_equal(self):
"""Test publish with bad time limit (equal to timeout)."""
sqs_inst = sqs.SQSTalk()
with mock.patch(
'kale.queue_info.QueueInfo.get_queue') as mock_get_queue:
mock_queue = mock.MagicMock()
mock_queue.visibility_timeout_sec = 600
mock_get_queue.return_value = mock_queue
mock_publisher = publisher.Publisher(sqs_inst)
mock_publisher._get_or_create_queue = mock.MagicMock()
payload = {'args': [], 'kwargs': {}}
mock_task_class = mock.MagicMock()
mock_task_class.time_limit = 600
with mock.patch('kale.message.KaleMessage') as mock_message:
mock_message.create_message.return_value = mock.MagicMock()
with self.assertRaises(
exceptions.InvalidTimeLimitTaskException):
mock_publisher.publish(mock_task_class, 1, payload)
def test_publish_bad_time_limit_greater(self):
"""Test publish with bad time limit (greater than timeout)."""
sqs_inst = sqs.SQSTalk()
with mock.patch(
'kale.queue_info.QueueInfo.get_queue') as mock_get_queue:
mock_queue = mock.MagicMock()
mock_queue.visibility_timeout_sec = 600
mock_get_queue.return_value = mock_queue
mock_publisher = publisher.Publisher(sqs_inst)
mock_publisher._get_or_create_queue = mock.MagicMock()
payload = {'args': [], 'kwargs': {}}
mock_task_class = mock.MagicMock()
mock_task_class.time_limit = 601
with mock.patch('kale.message.KaleMessage') as mock_message:
mock_message.create_message.return_value = mock.MagicMock()
with self.assertRaises(
exceptions.InvalidTimeLimitTaskException):
mock_publisher.publish(mock_task_class, 1, payload)
def test_publish_invalid_delay_sec(self):
"""Test publish with invalid delay_sec value."""
sqs_inst = sqs.SQSTalk()
mock_publisher = publisher.Publisher(sqs_inst)
mock_publisher._get_or_create_queue = mock.MagicMock()
payload = {'args': [], 'kwargs': {}}
mock_task_class = mock.MagicMock()
mock_task_class.time_limit = 2
delay_sec = settings.SQS_MAX_TASK_DELAY_SEC + 1
with mock.patch('kale.message.KaleMessage') as mock_message:
mock_message.create_message.return_value = mock.MagicMock()
with self.assertRaises(exceptions.InvalidTaskDelayException):
mock_publisher.publish(mock_task_class, 1, payload,
delay_sec=delay_sec)
| Nextdoor/ndkale | kale/tests/test_publisher.py | Python | bsd-2-clause | 5,994 |
#!/usr/bin/env python3
# Arroyo Calle, Adrián
# Crespo Jiménez, Cristina Alejandra
import math
import sys
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gdk, GdkPixbuf
import cairo
import coche
class GameArea(Gtk.DrawingArea):
def __init__(self):
super(GameArea, self).__init__()
self.set_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK | Gdk.EventMask.POINTER_MOTION_MASK)
self.connect("draw", self.draw)
self.connect("button-press-event", self.drag_start)
self.connect("button-release-event", self.drag_end)
self.connect("motion-notify-event", self.drag)
self.car = None
self.x_start = 0
self.y_start = 0
self.win = False
self.movements = 0
self.n_nivel = 0
self.level = niveles[0][:]
self.set_level(self.level)
self.asphalt = GdkPixbuf.Pixbuf.new_from_file("data/asphalt.png")
def draw(self, widget, cr):
width = widget.get_window().get_width()
height = widget.get_window().get_height()
if not self.win:
cr.rectangle(0, 0, width, height)
cr.set_source_rgb(1, 1, 1)
cr.fill()
cr.save()
cr.scale(float(width)/float(512), float(height)/float(512))
Gdk.cairo_set_source_pixbuf(cr, self.asphalt, 0, 0)
cr.paint()
cr.restore()
## PAINT CARS
for car in self.level:
cr.save()
img_width = 256
img_height = 128
x_scale = float(width) / float(img_width)
y_scale = float(height) / float(img_height)
cr.scale(x_scale/3, y_scale/6)
if car.orientation == "V":
cr.translate(car.real_x + 128, car.real_y + 128)
cr.rotate(math.pi/2)
cr.translate(-car.real_x - 128, -car.real_y)
Gdk.cairo_set_source_pixbuf(cr, car.img, car.real_x, car.real_y)
cr.paint()
cr.restore()
cr.set_source_rgb(0, 0, 0)
cr.select_font_face("Monospace", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
cr.set_font_size(16)
cr.move_to(10, round(height/8)*7.75)
try:
cr.show_text("NIVEL "+str(self.n_nivel + 1)+" RECORD: "+str(records[self.n_nivel])+" MOVIMIENTOS: "+str(int(self.movements)))
except:
cr.show_text("NIVEL "+str(self.n_nivel +1) +" RECORD: No establecido MOVIMIENTOS: "+str(int(self.movements)))
else:
cr.rectangle(0, 0, width, height)
cr.set_source_rgb(0, 0, 1)
cr.fill()
cr.set_source_rgb(1, 1, 1)
cr.select_font_face("Monospace", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(round(width/8))
cr.move_to(round(width/8), round(height/2))
cr.show_text("¡Victoria!")
def drag_start(self, widget, event):
if not self.win:
width = widget.get_window().get_width()
height = widget.get_window().get_height()
u_width = width / 6
u_height = height / 6
for car in self.level:
if u_width*(car.x -1) < event.x < u_width*(car.x + ( (car.size -1) if car.orientation == "H" else 0)):
if u_height*(car.y -1) < event.y < u_height*(car.y+(( car.size -1) if car.orientation == "V" else 0)):
self.car = car
self.x_init = car.x
self.y_init = car.y
self.x_start = self.car.real_x - event.x
self.y_start = self.car.real_y - event.y
break
else:
selector = Selector()
n_nivel = selector.run()
selector.destroy()
self.load_level(n_nivel)
def drag_end(self, widget, event):
if self.car != None:
self.car.x = round((self.car.real_x/128)+1)
self.car.y = round((self.car.real_y/128)+1)
self.movements += abs(self.x_init - self.car.x) + abs(self.y_init - self.car.y)
if self.level[0].x == 6 and self.level[0].y == 3:
self.win = True
try:
if self.movements < records[self.n_nivel]:
records[self.n_nivel] = int(self.movements)
write_records()
except:
records.append(int(self.movements))
write_records()
self.car = None
self.set_level(self.level)
self.queue_draw()
def drag(self, widget, event):
if self.car != None:
if self.car.orientation == "H":
self.car.real_x += (event.x - self.car.real_x) + self.x_start
if self.car.real_x > (self.car.x-1)*128:
if not self.car.casilla_libre(1, self.level):
self.car.real_x = (self.car.x-1)*128
else:
self.car.x += 1
if self.car.real_x < (self.car.x-1)*128:
if not self.car.casilla_libre(-1, self.level):
self.car.real_x = (self.car.x-1)*128
else:
self.car.x -= 1
else:
self.car.real_y += (event.y - self.car.real_y) + self.y_start
if self.car.real_y > (self.car.y-1)*128:
if not self.car.casilla_libre(1, self.level):
self.car.real_y = (self.car.y-1)*128
else:
self.car.y += 1
if self.car.real_y < (self.car.y-1)*128:
if not self.car.casilla_libre(-1, self.level):
self.car.real_y = (self.car.y-1)*128
else:
self.car.y -= 1
self.queue_draw()
def set_level(self, level):
self.level = level
for car in self.level:
car.real_x = (car.x -1)*128
car.real_y = (car.y -1)*128
def load_level(self, n_nivel):
self.level = niveles[n_nivel][:]
self.set_level(self.level)
self.n_nivel = n_nivel
self.movements = 0
self.win = False
self.queue_draw()
class Ventana(Gtk.Window):
def __init__(self):
super(Ventana, self).__init__()
self.set_title("Anrokku")
self.set_default_size(500, 500)
self.game = GameArea()
self.add(self.game)
self.connect("delete-event", self.menu)
self.connect("destroy", lambda x: sys.exit(0))
self.show_all()
self.menu(self)
def menu(self, widget, data=None):
selector = Selector()
n_level = selector.run()
if n_level == -1:
self.destroy()
selector.destroy()
if n_level == -2:
self.menu(self)
else:
self.game.load_level(n_level)
return True
class Selector(Gtk.Dialog):
def __init__(self):
super(Selector, self).__init__(flags=Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT)
self.set_title("Anrokku - Main Menu")
self.set_default_size(200, 400)
img = Gtk.Image()
img.set_from_file("data/Anrokku.png")
self.vbox.pack_start(img, False, False, False)
label = Gtk.Label("Selecciona el nivel")
self.vbox.pack_start(label, False, False, False)
combo = Gtk.ComboBoxText.new()
for i in range(len(records)+1):
combo.append_text("NIVEL "+str(i+1))
self.vbox.pack_start(combo, False, False, False)
combo.connect("changed", self.set)
salir = Gtk.Button("Salir de Anrokku")
self.vbox.pack_start(salir, False, False, False)
salir.connect("clicked", self.confirmar)
self.connect("delete-event", self.confirmar)
self.show_all()
def set(self, widget):
i = widget.get_active()
n_level = i
self.response(n_level)
def confirmar(self, widget, data=None):
dialog = Gtk.MessageDialog(type=Gtk.MessageType.QUESTION, buttons=Gtk.ButtonsType.YES_NO, flags=Gtk.DialogFlags.MODAL)
dialog.set_title("Anrokku")
dialog.set_markup("¿Desea salir de Anrokku?")
response = dialog.run()
dialog.destroy()
if response == Gtk.ResponseType.YES:
self.response(-1)
else:
self.response(-2)
return True
def write_records():
with open("records.txt", "w") as f_records:
for record in records:
f_records.write(str(record))
f_records.write("\n")
def read_records():
try:
f_records = open("records.txt", "r")
for line in f_records:
records.append(int(line))
except:
pass
records = []
read_records()
niveles = []
def read_levels():
f_niveles = open("niveles.txt", "r")
n_niveles = f_niveles.readline()
n_niveles = int(n_niveles)
names = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for _ in range(0, n_niveles):
n_coches = int(f_niveles.readline())
coches = []
for j in range(0, n_coches):
coches.append(coche.Coche(f_niveles.readline(), names[j], True if j == 0 else False))
niveles.append(coches[:])
read_levels()
# START
Ventana()
Gtk.main()
| AdrianArroyoCalle/Anrokku | main.py | Python | mit | 9,607 |
import sys, traceback, Ice
status = 0
ic = None
try:
ic = Ice.initialize(sys.argv)
except:
traceback.print_exc()
status = 1
if ic:
try:
ic.destroy()
except:
traceback.print_exc()
status = 1
sys.exit(status)
| swift1911/SQLAlchemy-tablecache | test/dumps.py | Python | mit | 254 |
# -*- coding: utf-8 -*-
"""
Factornado minimal example
--------------------------
You can run this example in typing:
>>> python minimal.py &
[1] 15539
Then you can test it with:
>>> curl http://localhost:3742/hello
Hello world
If 3742 is the port it's running on.
To end up the process, you can use:
>>> kill -SIGTERM -$(ps aux | grep 'python minimal.py' | awk '{print $2}')
"""
import factornado
import os
from factornado.handlers import Swagger, Log, Heartbeat
from tornado import web
class HelloHandler(factornado.handlers.web.RequestHandler):
swagger = {
"/{name}/{uri}": {
"get": {
"description": "Says hello.",
"parameters": [],
"responses": {
200: {"description": "OK"},
401: {"description": "Unauthorized"},
403: {"description": "Forbidden"},
404: {"description": "Not Found"},
}
}
}
}
def get(self):
self.write('Hello world\n')
config = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'minimal.yml')
app = factornado.Application(config, [
("/hello", HelloHandler),
("/swagger.json", Swagger),
("/swagger", web.RedirectHandler, {'url': '/swagger.json'}),
("/heartbeat", Heartbeat),
("/log", Log)
])
if __name__ == "__main__":
app.start_server()
| factornado/factornado | examples/minimal.py | Python | mit | 1,416 |
#!/usr/bin/env python3
import argparse
import pickle
import math
import numpy as np
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument('model', help='Pickle to read model from')
model_file = sys.argv[1]
trim_trailing_zeros = re.compile('0+p')
def small_hex(f):
hf = float(f).hex()
return trim_trailing_zeros.sub('p', hf)
def process_column(v, pad):
""" process and pad """
return [small_hex(f) for f in v] + [small_hex(0.0)] * pad
def cformatM(fh, name, X, nr=None, nc=None):
nrq = int(math.ceil(X.shape[1] / 4.0))
pad = nrq * 4 - X.shape[1]
lines = map(lambda v: ', '.join(process_column(v, pad)), X)
if nr is None:
nr = X.shape[1]
else:
nrq = int(math.ceil(nr / 4.0))
if nc is None:
nc = X.shape[0]
fh.write('float {}[] = {}\n'.format('__' + name, '{'))
fh.write('\t' + ',\n\t'.join(lines))
fh.write('};\n')
fh.write('_Mat {} = {}\n\t.nr = {},\n\t.nrq = {},\n\t.nc = {},\n\t.stride = {},\n\t.data.f = {}\n{};\n'.format('_' + name, '{', nr, nrq, nc, nrq * 4, '__' + name, '}'))
fh.write('const scrappie_matrix {} = &{};\n\n'.format(name, '_' + name))
def cformatV(fh, name, X):
nrq = int(math.ceil(X.shape[0] / 4.0))
pad = nrq * 4 - X.shape[0]
lines = ', '.join(list(map(lambda f: small_hex(f), X)) + [small_hex(0.0)] * pad)
fh.write('float {}[] = {}\n'.format( '__' + name, '{'))
fh.write('\t' + lines)
fh.write('};\n')
fh.write('_Mat {} = {}\n\t.nr = {},\n\t.nrq = {},\n\t.nc = {},\n\t.stride = {},\n\t.data.f = {}\n{};\n'.format('_' + name, '{', X.shape[0], nrq, 1, nrq * 4, '__' + name, '}'))
fh.write('const scrappie_matrix {} = &{};\n\n'.format(name, '_' + name))
if __name__ == '__main__':
args = parser.parse_args()
modelid = 'resgru_'
with open(args.model, 'rb') as fh:
network = pickle.load(fh, encoding='latin1')
network_major_version = network.version[0] if isinstance(network.version, tuple) else network.version
assert network_major_version >= 1, "Sloika model must be version >= 1. Perhaps you need to run Sloika's model_upgrade.py"
sys.stdout.write("""#pragma once
#ifndef NANONET_RGRGR_{}MODEL_H
#define NANONET_RGRGR_{}MODEL_H
#include <assert.h>
#include "../util.h"
""".format(modelid.upper(), modelid.upper()))
""" Convolution layer
"""
filterW = network.sublayers[0].W.get_value()
nfilter, _ , winlen = filterW.shape
cformatM(sys.stdout, 'conv_rgrgr_{}W'.format(modelid), filterW.reshape(-1, 1), nr = winlen * 4 - 3, nc=nfilter)
cformatV(sys.stdout, 'conv_rgrgr_{}b'.format(modelid), network.sublayers[0].b.get_value().reshape(-1))
sys.stdout.write("const int conv_rgrgr_{}stride = {};\n".format(modelid, network.sublayers[0].stride))
sys.stdout.write("""const size_t _conv_rgrgr_{}nfilter = {};
const size_t _conv_rgrgr_{}winlen = {};
""".format(modelid, nfilter, modelid, winlen))
""" Backward GRU (first layer)
"""
gru1 = network.sublayers[1].sublayers[0].sublayers[0]
cformatM(sys.stdout, 'gruB1_rgrgr_{}iW'.format(modelid), gru1.iW.get_value())
cformatM(sys.stdout, 'gruB1_rgrgr_{}sW'.format(modelid), gru1.sW.get_value())
cformatM(sys.stdout, 'gruB1_rgrgr_{}sW2'.format(modelid), gru1.sW2.get_value())
cformatV(sys.stdout, 'gruB1_rgrgr_{}b'.format(modelid), gru1.b.get_value().reshape(-1))
""" Forward GRU (second layer)
"""
gru2 = network.sublayers[2].sublayers[0]
cformatM(sys.stdout, 'gruF2_rgrgr_{}iW'.format(modelid), gru2.iW.get_value())
cformatM(sys.stdout, 'gruF2_rgrgr_{}sW'.format(modelid), gru2.sW.get_value())
cformatM(sys.stdout, 'gruF2_rgrgr_{}sW2'.format(modelid), gru2.sW2.get_value())
cformatV(sys.stdout, 'gruF2_rgrgr_{}b'.format(modelid), gru2.b.get_value().reshape(-1))
""" backward GRU(third layer)
"""
gru3 = network.sublayers[3].sublayers[0].sublayers[0]
cformatM(sys.stdout, 'gruB3_rgrgr_{}iW'.format(modelid), gru3.iW.get_value())
cformatM(sys.stdout, 'gruB3_rgrgr_{}sW'.format(modelid), gru3.sW.get_value())
cformatM(sys.stdout, 'gruB3_rgrgr_{}sW2'.format(modelid), gru3.sW2.get_value())
cformatV(sys.stdout, 'gruB3_rgrgr_{}b'.format(modelid), gru3.b.get_value().reshape(-1))
""" Forward GRU (fourth layer)
"""
gru4 = network.sublayers[4].sublayers[0]
cformatM(sys.stdout, 'gruF4_rgrgr_{}iW'.format(modelid), gru4.iW.get_value())
cformatM(sys.stdout, 'gruF4_rgrgr_{}sW'.format(modelid), gru4.sW.get_value())
cformatM(sys.stdout, 'gruF4_rgrgr_{}sW2'.format(modelid), gru4.sW2.get_value())
cformatV(sys.stdout, 'gruF4_rgrgr_{}b'.format(modelid), gru4.b.get_value().reshape(-1))
""" backward GRU(fifth layer)
"""
gru5 = network.sublayers[5].sublayers[0].sublayers[0]
cformatM(sys.stdout, 'gruB5_rgrgr_{}iW'.format(modelid), gru5.iW.get_value())
cformatM(sys.stdout, 'gruB5_rgrgr_{}sW'.format(modelid), gru5.sW.get_value())
cformatM(sys.stdout, 'gruB5_rgrgr_{}sW2'.format(modelid), gru5.sW2.get_value())
cformatV(sys.stdout, 'gruB5_rgrgr_{}b'.format(modelid), gru5.b.get_value().reshape(-1))
""" Softmax layer
"""
nstate = network.sublayers[6].W.get_value().shape[0]
shuffle = np.append(np.arange(nstate - 1) + 1, 0)
cformatM(sys.stdout, 'FF_rgrgr_{}W'.format(modelid), network.sublayers[6].W.get_value()[shuffle])
cformatV(sys.stdout, 'FF_rgrgr_{}b'.format(modelid), network.sublayers[6].b.get_value()[shuffle])
sys.stdout.write('#endif /* NANONET_RGRGR_{}_MODEL_H */'.format(modelid.upper()))
| nanoporetech/scrappie | misc/parse_rgrgr_resgru.py | Python | mpl-2.0 | 5,577 |
from smashbox.utilities import *
from smashbox.utilities.hash_files import *
from smashbox.protocol import file_upload, file_download
@add_worker
def main(step):
d = make_workdir()
reset_owncloud_account()
URL = oc_webdav_url()
filename=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.3))
r=file_upload(filename,URL)
file_download(os.path.basename(filename),URL,d)
analyse_hashfiles(d)
# upload again matching the existing etag
r=file_upload(filename,URL,header_if_match=r.headers['ETag'])
analyse_hashfiles(d)
# upload again with a non-matching etag
r = file_upload(filename,URL,header_if_match='!@# does not exist 123')
fatal_check(r.rc == 412) # precondition failed
| labkode/smashbox | protocol/test_protocol_simple_upload.py | Python | agpl-3.0 | 725 |
#!/bin/env python
#
# Copyright (c) 1999 - 2010, Vodafone Group Services Ltd
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of the Vodafone Group Services Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import parser_xml
def print_enum(enum):
print "Enum: ",enum.name
for value in enum.values:
print value[0],"=",value[1]
def print_param(param):
print "id:",param.id,"name:",param.name,"default:",param.default, \
"description:",param.description
def print_member(member):
print "Name:",member.name
print "Type:",member.type
print "Comment:",member.comment
def print_struct(struct):
print "Name:",struct.name
for member in struct.members:
print_member(member)
def print_request(req):
print "Name:",req.name,"version:",req.version
for param in req.params.values():
print_param(param)
for struct in req.structs.values():
print_struct(struct)
for enum in req.enums.values():
print_enum(enum)
def print_doc(doc):
print "---- Global Params ----"
for param in doc.params.values():
print_param(param)
print "---- Global Enums ----"
for enum in doc.enums.values():
print_enum(enum)
print "---- Requests ----"
for request in doc.requests.values():
print_request(request)
print "---- Replies ----"
for reply in doc.replies.values():
print_request(reply)
doc = parser_xml.parse("protocol.xml")
print_doc(doc)
| wayfinder/Wayfinder-CppCore-v3 | ngplib/generator_print.py | Python | bsd-3-clause | 2,826 |
# pylint: disable=missing-docstring
from __future__ import annotations
from pathlib import Path
from fava.ext import find_extensions
def test_find_extensions() -> None:
classes, errors = find_extensions(".", "NOMODULENAME")
assert not classes
assert len(errors) == 1
assert errors[0].message == 'Importing module "NOMODULENAME" failed.'
classes, errors = find_extensions(".", "fava")
assert not classes
assert len(errors) == 1
assert errors[0].message == 'Module "fava" contains no extensions.'
path = Path(__file__).parent.parent / "src" / "fava" / "ext"
classes, errors = find_extensions(str(path), "auto_commit")
assert len(classes) == 1
assert classes[0].__name__ == "AutoCommit"
assert not errors
path = Path(__file__).parent.parent / "src" / "fava" / "ext"
classes, errors = find_extensions(str(path), "portfolio_list")
assert len(classes) == 1
assert classes[0].__name__ == "PortfolioList"
assert not errors
| beancount/fava | tests/test_ext.py | Python | mit | 994 |
import os
from nose.tools import eq_
from moban import file_system
from moban.jinja2.engine import Engine
from moban.jinja2.extensions import jinja_global
from moban.core.moban_factory import MobanEngine
def test_globals():
output = "globals.txt"
test_dict = dict(hello="world")
jinja_global("test", test_dict)
path = os.path.join("tests", "fixtures", "globals")
template_fs = file_system.get_multi_fs([path])
engine = MobanEngine(template_fs, path, Engine(template_fs))
engine.render_to_file("basic.template", "basic.yml", output)
with open(output, "r") as output_file:
content = output_file.read()
eq_(content, "world\n\ntest")
os.unlink(output)
| chfw/moban | tests/jinja2/test_extensions.py | Python | mit | 704 |
import tensorflow as tf
# training case
x_data = [1., 2., 3., 4.]
y_data = [2., 4., 6., 8.]
# W와 b의 값을 변수로 지정해준다 -1000~1000까지 랜덤한 값으로 W와 b를 초기화
w = tf.Variable(tf.random_uniform([1], -10000., 10000.))
b = tf.Variable(tf.random_uniform([1], -10000., 10000.))
# 가정한 식
hypothesis = w * x_data + b
# Cost 함수식
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
# minimize
a = tf.Variable(0.1) # learning rate, alpha
optimizer = tf.train.GradientDescentOptimizer(a) # GradientDescent 알고리즘을 사용하여
# "경사타고 내려가기"라는 미분을 통해 최저 비용을 향해 진행하도록 만드는 핵심 함수이다
# 이때 rate를 전달했기 때문에 W축에 대해서 매번 a 만큼씩 내려가게 된다.
training = optimizer.minimize(cost) # 최소비용을 찾아준다.
init = tf.global_variables_initializer() # 변수를 초기화 해준다, run함수를 호출하기전에 나와야한다
# launch
sess = tf.Session()
sess.run(init) # init을 제일 먼저 실행시켜준다
# fit the line
for step in range(2001):
sess.run(training)
if step % 20 == 0:
print("step",step,"cost", sess.run(cost),"w", sess.run(w),"b", sess.run(b))
# learns best fit is w: [1] b: [0] | MilyangParkJaeHoon/deeplearning | lab2/linear.py | Python | mit | 1,273 |
# -*- coding: utf-8 -*-
import base64
import datetime
import re
from .._globals import IDENTITY
from ..drivers import cx_Oracle
from .base import BaseAdapter
class OracleAdapter(BaseAdapter):
drivers = ('cx_Oracle',)
commit_on_alter_table = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR2(%(length)s)',
'text': 'CLOB',
'json': 'CLOB',
'password': 'VARCHAR2(%(length)s)',
'blob': 'CLOB',
'upload': 'VARCHAR2(%(length)s)',
'integer': 'INT',
'bigint': 'NUMBER',
'float': 'FLOAT',
'double': 'BINARY_DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATE',
'id': 'NUMBER PRIMARY KEY',
'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
'big-id': 'NUMBER PRIMARY KEY',
'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def trigger_name(self,tablename):
return '%s_trigger' % tablename
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'dbms_random.value'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def REGEXP(self, first, second):
return 'REGEXP_LIKE(%s, %s)' % (self.expand(first),
self.expand(second, 'string'))
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def constraint_name(self, tablename, fieldname):
constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname)
if len(constraint_name)>30:
constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7])
return constraint_name
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return ":CLOB('%s')" % obj
elif fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "oracle"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
self.test_query = 'SELECT 1 FROM DUAL;'
ruri = uri.split('://',1)[1]
if not 'threaded' in driver_args:
driver_args['threaded']=True
def connector(uri=ruri,driver_args=driver_args):
return self.driver.connect(uri,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))")
def execute(self, command, args=None):
args = args or []
i = 1
while True:
m = self.oracle_fix.match(command)
if not m:
break
command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]
args.append(m.group('clob')[6:-2].replace("''", "'"))
i += 1
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command, args)
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._rname or table._tablename
id_name = table._id.name
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name)
self.execute("""
CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW
DECLARE
curr_val NUMBER;
diff_val NUMBER;
PRAGMA autonomous_transaction;
BEGIN
IF :NEW.%(id)s IS NOT NULL THEN
EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;
diff_val := :NEW.%(id)s - curr_val - 1;
IF diff_val != 0 THEN
EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val;
EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;
EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1';
END IF;
END IF;
SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL;
END;
""" % dict(trigger_name=trigger_name, tablename=tablename,
sequence_name=sequence_name,id=id_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT %s.currval FROM dual;' % sequence_name)
return long(self.cursor.fetchone()[0])
#def parse_value(self, value, field_type, blob_decode=True):
# if blob_decode and isinstance(value, cx_Oracle.LOB):
# try:
# value = value.read()
# except self.driver.ProgrammingError:
# # After a subsequent fetch the LOB value is not valid anymore
# pass
# return BaseAdapter.parse_value(self, value, field_type, blob_decode)
def _fetchall(self):
if any(x[1]==cx_Oracle.LOB or x[1]==cx_Oracle.CLOB for x in self.cursor.description):
return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \
for c in r]) for r in self.cursor]
else:
return self.cursor.fetchall()
def sqlsafe_table(self, tablename, ot=None):
if ot is not None:
return (self.QUOTE_TEMPLATE + ' ' \
+ self.QUOTE_TEMPLATE) % (ot, tablename)
return self.QUOTE_TEMPLATE % tablename
def _insert(self, table, fields):
table_rname = table.sqlsafe
if fields:
keys = ','.join(f.sqlsafe_name for f, v in fields)
r_values = dict()
def value_man(f, v, r_values):
if f.type is 'text':
r_values[':' + f.sqlsafe_name] = self.expand(v, f.type)
return ':' + f.sqlsafe_name
else:
return self.expand(v, f.type)
values = ','.join(value_man(f, v, r_values) for f, v in fields)
return ('INSERT INTO %s(%s) VALUES (%s);' % (table_rname, keys, values), r_values)
else:
return (self._insert_empty(table), None)
def insert(self, table, fields):
query, values = self._insert(table,fields)
try:
if not values:
self.execute(query)
else:
self.execute(query, values)
except Exception:
e = sys.exc_info()[1]
if hasattr(table,'_on_insert_error'):
return table._on_insert_error(table,fields,e)
raise e
if hasattr(table, '_primarykey'):
mydict = dict([(k[0].name, k[1]) for k in fields if k[0].name in table._primarykey])
if mydict != {}:
return mydict
id = self.lastrowid(table)
if hasattr(table, '_primarykey') and len(table._primarykey) == 1:
id = {table._primarykey[0]: id}
if not isinstance(id, (int, long)):
return id
rid = Reference(id)
(rid._table, rid._record) = (table, None)
return rid
| nonnib/eve-metrics | web2py/gluon/packages/dal/pydal/adapters/oracle.py | Python | mit | 9,812 |
#!/usr/bin/python
import time
import subprocess
from Xlib import X
from Xlib.display import Display
display = Display()
root = display.screen().root
root.grab_pointer(True,
X.ButtonPressMask | X.ButtonReleaseMask | X.PointerMotionMask,
X.GrabModeAsync, X.GrabModeAsync, 0, 0, X.CurrentTime)
root.grab_keyboard(True,
X.GrabModeAsync, X.GrabModeAsync, X.CurrentTime)
subprocess.call('xset dpms force off'.split())
time.sleep(1)
display.next_event()
| mondalaci/dpms-force-off-until-activity | nxmehta-xset-dpms-force-off.optimized.py | Python | gpl-3.0 | 474 |
from swpy.dscovr.clients import DscovrClient, DscovrRTClient
from swpy.dscovr.graph import DscovrGraph | jongyeob/swpy | swpy/dscovr/__init__.py | Python | gpl-2.0 | 102 |
"""XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
The Doc* classes can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the sys functions available through sys.func_name
import sys
self.sys = sys
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the sys methods
return list_public_methods(self) + \
['sys.' + method for method in list_public_methods(self.sys)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
from xmlrpc.client import Fault, dumps, loads
from http.server import BaseHTTPRequestHandler
import http.server
import socketserver
import sys
import os
import re
import pydoc
import inspect
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
hasattr(getattr(obj, member), '__call__')]
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. There should never be any
reason to instantiate this class directly.
"""
def __init__(self, allow_none=False, encoding=None):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior.
"""
try:
params, method = loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response.encode(self.encoding)
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods)
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault as fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = b''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
self.send_header("X-traceback", traceback.format_exc())
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = b'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(socketserver.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = BaseHTTPRequestHandler.responses[code]
response = http.server.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
response = response.encode('utf-8')
print('Status: %d %s' % (code, message))
print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE)
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_request(self, request_text = None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (ValueError, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server.
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', ''.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation().encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=1, allow_none=False, encoding=None,
bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation().encode('utf-8')
print('Content-Type: text/html')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
print('Running XML-RPC server on port 8000')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/xmlrpc/server.py | Python | mit | 32,062 |
import os, sys
import commands
import optparse
import shutil
INSTALL_DIR = ""
BASE_DIR = os.path.dirname(__file__)
SIP_FILE = "poppler-qt4.sip"
BUILD_DIR = "build"
SBF_FILE = "QtPoppler.sbf"
def _cleanup_path(path):
"""
Cleans the path:
- Removes traling / or \
"""
path = path.rstrip('/')
path = path.rstrip('\\')
return path
def pkgconfig(package):
'''
Calls pkg-config for the given package
Returns: - None if the package is not found.
- {'inc_dirs': [List of -L Paths]
'lib_dirs' : [List of -I Paths]
'libs ' : [List of -l libs]
}
'''
code, msg = commands.getstatusoutput("pkg-config --exists %s" % package)
if code != 0:
return None
tokens = commands.getoutput("pkg-config --libs --cflags %s" % package).split()
return {
'inc_dirs': [ token[2:] for token in tokens if token[:2] == '-I'],
'lib_dirs': [ token[2:] for token in tokens if token[:2] == '-L'],
'libs': [ token[2:] for token in tokens if token[:2] == '-l'],
}
def create_optparser(sipcfg):
'''Comandline parser'''
def store_abspath(option, opt_str, value, parser):
setattr(parser.values, option.dest, os.path.abspath(value))
def get_default_moddir():
default = sipcfg.default_mod_dir
default = os.path.join(default, INSTALL_DIR)
return default
p = optparse.OptionParser(usage="%prog [options]")
default_moddir = get_default_moddir()
p.add_option("-d", "--destdir", action="callback",
default=default_moddir, type="string",
metavar="DIR",
dest="moddir", callback=store_abspath,
help="Where to install PyPoppler-Qt4 python modules."
"[default: %default]")
p.add_option("-s", "--sipdir", action="callback",
default=os.path.join(sipcfg.default_sip_dir, INSTALL_DIR),
metavar="DIR", dest="sipdir", callback=store_abspath,
type="string", help="Where the .sip files will be installed "
"[default: %default]")
p.add_option("", "--popplerqt-includes-dir", action="callback",
default=None,
metavar="DIR", dest="popplerqt_inc_dirs", callback=store_abspath,
type="string", help="PopplerQt include paths"
"[default: Auto-detected with pkg-config]")
p.add_option("", "--popplerqt-libs-dir", action="callback",
default=None,
metavar="DIR", dest="popplerqt_lib_dirs", callback=store_abspath,
type="string", help="PopplerQt libraries paths"
"[default: Auto-detected with pkg-config]")
return p
def get_pyqt4_config():
try:
import PyQt4.pyqtconfig
return PyQt4.pyqtconfig.Configuration()
except ImportError, e:
print >> sys.stderr, "ERROR: PyQt4 not found."
sys.exit(1)
def get_sip_config():
try:
import sipconfig
return sipconfig.Configuration()
except ImportError, e:
print >> sys.stderr, "ERROR: SIP (sipconfig) not found."
sys.exit(1)
def get_popplerqt_config(opts):
config = pkgconfig('poppler-qt4')
if config is not None:
found_pkgconfig = True
else:
found_pkgconfig = False
config = {'libs': ['poppler-qt4', 'poppler'],
'inc_dirs': None,
'lib_dirs': None}
if opts.popplerqt_inc_dirs is not None:
config['inc_dirs'] = opts.popplerqt_inc_dirs.split(" ")
if opts.popplerqt_lib_dirs is not None:
config['lib_dirs'] = opts.popplerqt_lib_dirs.split(" ")
if config['lib_dirs'] is None or config['inc_dirs'] is None:
print >> sys.stderr, "ERROR: poppler-qt4 not found."
print "Try to define PKG_CONFIG_PATH "
print "or use --popplerqt-libs-dir and --popplerqt-includes-dir options"
sys.exit(1)
config['inc_dirs'] = map(_cleanup_path, config['inc_dirs'])
config['lib_dirs'] = map(_cleanup_path, config['lib_dirs'])
config['sip_dir'] = _cleanup_path(opts.sipdir)
config['mod_dir'] = _cleanup_path(opts.moddir)
print "Using PopplerQt include paths: %s" % config['inc_dirs']
print "Using PopplerQt libraries paths: %s" % config['lib_dirs']
print "Configured to install SIP in %s" % config['sip_dir']
print "Configured to install binaries in %s" % config['mod_dir']
return config
def create_build_dir():
dir = os.path.join(BASE_DIR, BUILD_DIR)
if os.path.exists(dir):
return
try:
os.mkdir(dir)
except:
print >> sys.stderr, "ERROR: Unable to create the build directory (%s)" % dir
sys.exit(1)
def run_sip(pyqtcfg):
create_build_dir()
cmd = [pyqtcfg.sip_bin,
"-c", os.path.join(BASE_DIR, BUILD_DIR),
"-b", os.path.join(BUILD_DIR, SBF_FILE),
"-I", pyqtcfg.pyqt_sip_dir,
pyqtcfg.pyqt_sip_flags,
os.path.join(BASE_DIR, SIP_FILE)]
os.system( " ".join(cmd) )
def generate_makefiles(pyqtcfg, popplerqtcfg, opts):
from PyQt4 import pyqtconfig
import sipconfig
pypopplerqt4config_file = os.path.join(BASE_DIR, "pypopplerqt4config.py")
# Creeates the Makefiles objects for the build directory
makefile_build = pyqtconfig.sipconfig.ModuleMakefile(
configuration=pyqtcfg,
build_file=SBF_FILE,
dir=BUILD_DIR,
install_dir=popplerqtcfg['mod_dir'],
warnings=1,
qt=['QtCore', 'QtGui', 'QtXml']
)
# Add extras dependencies for the compiler and the linker
# Libraries names don't include any platform specific prefixes
# or extensions (e.g. the "lib" prefix on UNIX, or the ".dll" extension on Windows)
makefile_build.extra_lib_dirs = popplerqtcfg['lib_dirs']
makefile_build.extra_libs = popplerqtcfg['libs']
makefile_build.extra_include_dirs = popplerqtcfg['inc_dirs']
# Generates build Makefile
makefile_build.generate()
# Generates root Makefile
installs_root = []
installs_root.append( (os.path.join(BASE_DIR, SIP_FILE), popplerqtcfg['sip_dir']) )
installs_root.append( (pypopplerqt4config_file, popplerqtcfg['mod_dir']) )
sipconfig.ParentMakefile(
configuration=pyqtcfg,
subdirs=[_cleanup_path(BUILD_DIR)],
installs=installs_root
).generate()
def generate_configuration_module(pyqtcfg, popplerqtcfg, opts):
import sipconfig
content = {
"pypopplerqt4_sip_dir": popplerqtcfg['sip_dir'],
"pypopplerqt4_sip_flags": pyqtcfg.pyqt_sip_flags,
"pypopplerqt4_mod_dir": popplerqtcfg['mod_dir'],
"pypopplerqt4_modules": 'PopplerQt',
"popplerqt4_inc_dirs": popplerqtcfg['inc_dirs'],
"popplerqt4_lib_dirs": popplerqtcfg['lib_dirs'],
}
# This creates the pypopplerqt4config.py module from the pypopplerqt4config.py.in
# template and the dictionary.
sipconfig.create_config_module(
os.path.join(BASE_DIR, "pypopplerqt4config.py"),
os.path.join(BASE_DIR, "pypopplerqt4config.py.in"),
content)
def main():
sipcfg = get_sip_config()
pyqtcfg = get_pyqt4_config()
parser = create_optparser(sipcfg)
opts, args = parser.parse_args()
popplerqtcfg = get_popplerqt_config(opts)
run_sip(pyqtcfg)
generate_makefiles(pyqtcfg, popplerqtcfg, opts)
generate_configuration_module(pyqtcfg, popplerqtcfg, opts)
if __name__ == "__main__":
main()
| cbeing/remoteSlideShow | third_party/pypoppler-qt4/configure.py | Python | mit | 7,528 |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
## BE PLATFORM NETURAL
import nt
import sys
colon = ':'
separator = '\\'
def create_new_file(filename):
f = file(filename, "w")
f.close()
def append_string_to_file(filename, *lines):
f = file(filename, "a")
for x in lines:
f.writelines(x + "\n")
f.close()
def directory_exists(path):
if sys.platform=="win32":
return nt.access(path, nt.F_OK)
else:
try:
nt.stat(path)
return True
except:
return False
def file_exists(file):
if sys.platform=="win32":
return nt.access(file, nt.F_OK)
else:
try:
nt.stat(file)
return True
except:
return False
def file_exists_in_path(file):
full_path = [nt.environ[x] for x in nt.environ.keys() if x.lower() == "path"]
if len(full_path)==0:
return False
else:
full_path = full_path[0]
for path in [nt.getcwd()] + full_path.split(";"):
path = path.lstrip().rstrip()
if file_exists(path + "\\" + file) == True:
return True
return False
# need consider .. and . later
def fullpath(path):
if colon not in path:
return nt.getcwd() + separator + path
elif sys.platform!="win32":
from System.IO.Path import GetFullPath
return GetFullPath(path)
else:
return path
def path_combine(*paths):
l = len(paths)
p = ''
for x in paths[:-1]:
if len(x)==0 or x[-1] == separator:
p += x
else:
p += x + separator
return p + paths[-1]
def get_full_dir_name(path):
"""removes ~# from short file names"""
if sys.platform == "win32": return path
import System
return System.IO.DirectoryInfo(path).FullName
def ensure_directory_present(path):
path = fullpath(path)
p = ''
for x in path.split(separator):
p += x + separator
if not directory_exists(p):
nt.mkdir(p)
def write_to_file(filename, content=''):
filename = fullpath(filename)
pos = filename.rfind(separator)
try:
ensure_directory_present(filename[:pos])
f = file(filename, 'w')
f.write(content)
f.close()
except:
raise AssertionError('unable to write to file')
def delete_files(*files):
for f in files:
try: nt.remove(f)
except: pass
def get_parent_directory(path, levels=1):
while levels:
pos = path[:-1].rfind(separator)
if pos < 0:
return ""
path = path[:pos]
levels -= 1
return path
def samefile(file1, file2):
return fullpath(file1).lower() == fullpath(file2).lower()
def filecopy(oldpath, newpath):
if samefile(oldpath, newpath):
raise AssertionError("%s and %s are same" % (oldpath, newpath))
of, nf = None, None
try:
of = file(oldpath, 'rb')
nf = file(newpath, 'wb')
while True:
b = of.read(1024 * 16)
if not b:
break
nf.write(b)
finally:
if of: of.close()
if nf: nf.close()
def clean_directory(path):
for f in nt.listdir(path):
try:
nt.unlink(path_combine(path, f))
except:
pass
def get_directory_name(file):
file = fullpath(file)
pos = file.rfind(separator)
return file[:pos]
def find_peverify():
if sys.platform != 'cli': return None
import System
for d in System.Environment.GetEnvironmentVariable("PATH").split(';'):
file = path_combine(d, "peverify.exe")
if file_exists(file):
return file
print("""
#################################################
# peverify.exe not found. Test will fail. #
#################################################
""")
return None
def get_mod_names(filename):
'''
Returns a list of all Python modules and subpackages in the same location
as filename w/o their ".py" extension.
'''
directory = filename
if file_exists(filename):
directory = get_directory_name(filename)
else:
raise Exception("%s does not exist!" % (str(filename)))
#Only look at files with the .py extension and directories.
ret_val = [x.rsplit(".py")[0] for x in nt.listdir(directory) if (x.endswith(".py") or "." not in x) \
and x.lower()!="__init__.py"]
return ret_val
def delete_all_f(module_name):
module = sys.modules[module_name]
for x in dir(module):
if x.startswith('_f_'):
fn = getattr(module, x)
if isinstance(fn, str):
try: nt.unlink(fn)
except: pass
| tempbottle/ironpython3 | Src/IronPython/Lib/iptest/file_util.py | Python | apache-2.0 | 5,541 |
import os
from catalyst.support import addl_arg_parse
class TargetBase(object):
"""
The toplevel class for all targets. This is about as generic as we get.
"""
def __init__(self, myspec, addlargs):
addl_arg_parse(myspec,addlargs,self.required_values,self.valid_values)
self.settings=myspec
self.env = {
'PATH': '/bin:/sbin:/usr/bin:/usr/sbin',
'TERM': os.getenv('TERM', 'dumb'),
}
| proneetv/catalyst | catalyst/base/targetbase.py | Python | gpl-2.0 | 403 |
import sys
import traceback
from contextlib import closing
from functools import wraps
from StringIO import StringIO
from zipfile import ZipFile, ZIP_DEFLATED
from celery.task import task
from django.conf import settings
from django.core.mail import send_mail, EmailMessage
from django.core.files.storage import default_storage
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from go.vumitools.contact.models import ContactNotFoundError
from go.base.models import UserProfile
from go.base.utils import UnicodeCSVWriter, vumi_api
from go.contacts.parsers import ContactFileParser
from go.contacts.utils import contacts_by_key
def with_user_api(func):
@wraps(func)
def wrapper(account_key, *args, **kw):
with closing(vumi_api()) as api:
user_api = api.get_user_api(account_key)
return func(user_api, account_key, *args, **kw)
return wrapper
@task(ignore_result=True)
@with_user_api
def delete_group(api, account_key, group_key):
# NOTE: There is a small chance that this can break when running in
# production if the load is high and the queues have backed up.
# What could happen is that while contacts are being removed from
# the group, new contacts could have been added before the group
# has been deleted. If this happens those contacts will have
# secondary indexes in Riak pointing to a non-existent Group.
contact_store = api.contact_store
group = contact_store.get_group(group_key)
# We do this one at a time because we're already saving them one at a time
# and the boilerplate for fetching batches without having them all sit in
# memory is ugly.
contacts_page = group.backlinks.contact_keys()
while contacts_page is not None:
for contact_key in contacts_page:
contact = contact_store.get_contact_by_key(contact_key)
contact.groups.remove(group)
contact.save()
contacts_page = contacts_page.next_page()
group.delete()
@task(ignore_result=True)
@with_user_api
def delete_group_contacts(api, account_key, group_key):
contact_store = api.contact_store
group = contact_store.get_group(group_key)
contacts_page = contact_store.get_contact_keys_for_group(group)
# FIXME: We pull all keys into memory to avoid modifying search results if
# we're deleting contacts that are part of a smart group.
contact_keys = []
while contacts_page is not None:
contact_keys.extend(contacts_page)
contacts_page = contacts_page.next_page()
# We do this one at a time because we're already saving them one at a time
# and the boilerplate for fetching batches without having them all sit in
# memory is ugly.
for contact_key in contact_keys:
contact_store.get_contact_by_key(contact_key).delete()
def zipped_file(filename, data):
zipio = StringIO()
zf = ZipFile(zipio, "a", ZIP_DEFLATED)
zf.writestr(filename, data)
zf.close()
return zipio.getvalue()
_contact_fields = [
'key',
'name',
'surname',
'email_address',
'msisdn',
'dob',
'twitter_handle',
'facebook_id',
'bbm_pin',
'gtalk_id',
'mxit_id',
'wechat_id',
'created_at',
]
def contacts_to_csv(contacts, include_extra=True):
contacts = sorted(contacts, key=lambda c: c.created_at)
io = StringIO()
writer = UnicodeCSVWriter(io)
# Collect the possible field names for this set of contacts, depending
# the number of contacts found this could be potentially expensive.
extra_fields = set()
if include_extra:
for contact in contacts:
extra_fields.update(contact.extra.keys())
extra_fields = sorted(extra_fields)
# write the CSV header, prepend extras with `extra-` if it happens to
# overlap with any of the existing contact's fields.
writer.writerow(_contact_fields + [
('extras-%s' % (f,) if f in _contact_fields else f)
for f in extra_fields])
# loop over the contacts and create the row populated with
# the values of the selected fields.
for contact in contacts:
row = [unicode(getattr(contact, field, None) or '')
for field in _contact_fields]
if include_extra:
row.extend([unicode(contact.extra[extra_field] or '')
for extra_field in extra_fields])
writer.writerow(row)
return io.getvalue()
def get_group_contacts(contact_store, *groups):
# TODO: FIXME: Kill this thing. It keeps all contact objects in memory.
contact_keys = []
for group in groups:
contacts_page = contact_store.get_contact_keys_for_group(group)
while contacts_page is not None:
contact_keys.extend(contacts_page)
contacts_page = contacts_page.next_page()
return contacts_by_key(contact_store, *contact_keys)
@task(ignore_result=True)
@with_user_api
def export_contacts(api, account_key, contact_keys, include_extra=True):
"""
Export a list of contacts as a CSV file and email to the account
holders' email address.
:param str account_key:
The account holders account key
:param str contact_keys:
The keys of the contacts to export
:param bool include_extra:
Whether or not to include the extra data stored in the dynamic field.
"""
contact_store = api.contact_store
all_key_count = len(contact_keys)
message_content_template = 'Please find the CSV data for %s contact(s)'
if all_key_count > settings.CONTACT_EXPORT_TASK_LIMIT:
contact_keys = contact_keys[:settings.CONTACT_EXPORT_TASK_LIMIT]
message_content_template = '\n'.join([
'NOTE: There are too many contacts to export.',
'Please find the CSV data for %%s (out of %s) contacts.' % (
all_key_count,)])
contacts = contacts_by_key(contact_store, *contact_keys)
data = contacts_to_csv(contacts, include_extra)
file = zipped_file('contacts-export.csv', data)
# Get the profile for this user so we can email them when the import
# has been completed.
user_profile = UserProfile.objects.get(user_account=account_key)
email = EmailMessage(
'Contacts export', message_content_template % len(contacts),
settings.DEFAULT_FROM_EMAIL, [user_profile.user.email])
email.attach('contacts-export.zip', file, 'application/zip')
email.send()
@task(ignore_result=True)
@with_user_api
def export_all_contacts(api, account_key, include_extra=True):
"""
Export all contacts as a CSV file and email to the account
holders' email address.
:param str account_key:
The account holders account key
:param bool include_extra:
Whether or not to include the extra data stored in the dynamic field.
"""
contact_store = api.contact_store
contact_keys = contact_store.contacts.all_keys()
return export_contacts(account_key, contact_keys,
include_extra=include_extra)
@task(ignore_result=True)
@with_user_api
def export_group_contacts(api, account_key, group_key, include_extra=True):
"""
Export a group's contacts as a CSV file and email to the account
holders' email address.
:param str account_key:
The account holders account key
:param str group_key:
The group to export contacts for (can be either static or smart groups)
:param bool include_extra:
Whether or not to include the extra data stored in the dynamic field.
"""
contact_store = api.contact_store
group = contact_store.get_group(group_key)
contacts = get_group_contacts(contact_store, group)
data = contacts_to_csv(contacts, include_extra)
file = zipped_file('contacts-export.csv', data)
# Get the profile for this user so we can email them when the import
# has been completed.
user_profile = UserProfile.objects.get(user_account=account_key)
email = EmailMessage(
'%s contacts export' % (group.name,),
'Please find the CSV data for %s contact(s) from '
'group "%s" attached.\n\n' % (len(contacts), group.name),
settings.DEFAULT_FROM_EMAIL, [user_profile.user.email])
email.attach('contacts-export.zip', file, 'application/zip')
email.send()
@task(ignore_result=True)
@with_user_api
def export_many_group_contacts(api, account_key, group_keys,
include_extra=True):
"""
Export multiple group contacts as a single CSV file and email to the
account holders' email address.
:param str account_key:
The account holders account key
:param list group_keys:
The groups to export contacts for
(can be either static or smart groups)
:param bool include_extra:
Whether or not to include the extra data stored in the dynamic field.
"""
contact_store = api.contact_store
groups = [contact_store.get_group(k) for k in group_keys]
contacts = get_group_contacts(contact_store, *groups)
data = contacts_to_csv(contacts, include_extra)
file = zipped_file('contacts-export.csv', data)
# Get the profile for this user so we can email them when the import
# has been completed.
user_profile = UserProfile.objects.get(user_account=account_key)
email = EmailMessage(
'Contacts export',
'Please find the attached CSV data for %s contact(s) from the '
'following groups:\n%s\n' %
(len(contacts), '\n'.join(' - %s' % g.name for g in groups)),
settings.DEFAULT_FROM_EMAIL, [user_profile.user.email])
email.attach('contacts-export.zip', file, 'application/zip')
email.send()
@task(ignore_result=True)
@with_user_api
def import_new_contacts_file(api, account_key, group_key, file_name, file_path,
fields, has_header):
contact_store = api.contact_store
group = contact_store.get_group(group_key)
# Get the profile for this user so we can email them when the import
# has been completed.
user_profile = UserProfile.objects.get(user_account=account_key)
written_contacts = []
try:
extension, parser = ContactFileParser.get_parser(file_name)
contact_dictionaries = parser.parse_file(file_path, fields, has_header)
for counter, contact_dictionary in enumerate(contact_dictionaries):
# Make sure we set this group they're being uploaded in to
contact_dictionary['groups'] = [group.key]
contact = contact_store.new_contact(**contact_dictionary)
written_contacts.append(contact)
send_mail(
'Contact import completed successfully.',
render_to_string('contacts/import_completed_mail.txt', {
'count': counter,
'group': group,
'user': user_profile.user,
}), settings.DEFAULT_FROM_EMAIL, [user_profile.user.email],
fail_silently=False)
except Exception:
# Clean up if something went wrong, either everything is written
# or nothing is written
for contact in written_contacts:
contact.delete()
exc_type, exc_value, exc_traceback = sys.exc_info()
send_mail(
'Something went wrong while importing the contacts.',
render_to_string('contacts/import_failed_mail.txt', {
'user': user_profile.user,
'group_key': group_key,
'account_key': account_key,
'file_name': file_name,
'file_path': file_path,
'fields': fields,
'has_header': has_header,
'exception_type': exc_type,
'exception_value': mark_safe(exc_value),
'exception_traceback': mark_safe(
traceback.format_tb(exc_traceback)),
}), settings.DEFAULT_FROM_EMAIL, [
user_profile.user.email,
'support+contact-import@vumi.org',
], fail_silently=False)
finally:
default_storage.delete(file_path)
@with_user_api
def import_and_update_contacts(api, account_key, contact_mangler, group_key,
file_name, file_path, fields, has_header):
contact_store = api.contact_store
group = contact_store.get_group(group_key)
user_profile = UserProfile.objects.get(user_account=account_key)
extension, parser = ContactFileParser.get_parser(file_name)
contact_dictionaries = parser.parse_file(file_path, fields, has_header)
errors = []
counter = 0
for idx, contact_dictionary in enumerate(contact_dictionaries):
try:
key = contact_dictionary.pop('key')
contact = contact_store.get_contact_by_key(key)
contact_dictionary = contact_mangler(contact, contact_dictionary)
contact_store.update_contact(key, **contact_dictionary)
counter += 1
except KeyError, e:
errors.append(('row %d' % (idx + 1,), 'No key provided'))
except ContactNotFoundError, e:
errors.append((key, str(e)))
except Exception, e:
errors.append((key, str(e)))
email = render_to_string(
'contacts/import_upload_is_truth_completed_mail.txt', {
'count': counter,
'errors': errors,
'group': group,
'user': user_profile.user,
})
send_mail(
'Contact import completed.',
email, settings.DEFAULT_FROM_EMAIL, [user_profile.user.email],
fail_silently=False)
default_storage.delete(file_path)
@task(ignore_result=True)
def import_upload_is_truth_contacts_file(account_key, group_key, file_name,
file_path, fields, has_header):
def merge_operation(contact, contact_dictionary):
# NOTE: The order here is important, the new extra is
# the truth which we want to maintain
new_extra = {}
new_extra.update(dict(contact.extra))
new_extra.update(contact_dictionary.pop('extra', {}))
new_subscription = {}
new_subscription.update(dict(contact.subscription))
new_subscription.update(contact_dictionary.pop('subscription', {}))
contact_dictionary['extra'] = new_extra
contact_dictionary['subscription'] = new_subscription
contact_dictionary['groups'] = [group_key]
return contact_dictionary
return import_and_update_contacts(
account_key, merge_operation, group_key, file_name, file_path, fields,
has_header)
@task(ignore_result=True)
def import_existing_is_truth_contacts_file(account_key, group_key, file_name,
file_path, fields, has_header):
def merge_operation(contact, contact_dictionary):
# NOTE: The order here is important, the existing extra is
# the truth which we want to maintain
cloned_contact_dictionary = contact_dictionary.copy()
cloned_contact_dictionary['groups'] = [group_key]
new_extra = {}
new_extra.update(contact_dictionary.pop('extra', {}))
new_extra.update(dict(contact.extra))
cloned_contact_dictionary['extra'] = new_extra
new_subscription = {}
new_subscription.update(contact_dictionary.pop('subscription', {}))
new_subscription.update(dict(contact.subscription))
cloned_contact_dictionary['subscription'] = new_subscription
for key in contact_dictionary.keys():
# NOTE: If the contact already has any kind of value that
# resolves to `True` then skip it.
# The current implementation also means that we'll
# replace attributes that are empty strings.
value = getattr(contact, key, None)
if value:
cloned_contact_dictionary.pop(key)
return cloned_contact_dictionary
return import_and_update_contacts(
account_key, merge_operation, group_key, file_name, file_path, fields,
has_header)
| praekelt/vumi-go | go/contacts/tasks.py | Python | bsd-3-clause | 16,214 |
# -*- encoding: utf-8 -*-
import codecs
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RecRecife.settings")
django.setup()
from RecRecife.settings import STATIC_DIR, GOOGLE_API_SECRET_KEY
from recmap.models import Endereco, Horario, Coleta, Setor, ColetaHorario
from django.db.utils import IntegrityError
from datetime import datetime
from googlemaps import googlemaps
dias_dict = {
'SEG': 'Segunda',
'TER': u'Terça',
'QUA': 'Quarta',
'QUI': 'Quinta',
'SEX': 'Sexta',
'SAB': U'Sábado',
'DIARIA': u'Diária',
'DIÁRIA': u'Diária',
}
"""
ENDEREÇO(NOME, NOME_MIN, NOME_CSV, BAIRRO, LAT, LONG)
SETOR(ID, FREQUÊNCIA)
COLETA(ID, NOME_ENDERECO, SETOR_ID, NUM_ROTA)
HORARIO(TURNO, INTERVALO)
COLETA_HORARIO(ID_COLETA, INTERVALO)
"""
if __name__ == '__main__':
inicio = datetime.now()
gmaps = googlemaps.Client(key=GOOGLE_API_SECRET_KEY)
with codecs.open(STATIC_DIR + '\\recrecife\\csv\\roteirizacao.csv', 'r', 'utf-8') as f:
f.next()
counter = 0
not_found = 0
for line in f:
line = line.split(';')
try:
horario = Horario.objects.get_or_create(intervalo=line[0], turno=line[3].lower().title())
except IntegrityError:
horario = Horario.objects.get_or_create(intervalo=line[0])
dias = line[5].split(',')
for dia in dias:
setor = Setor.objects.get_or_create(nome_setor=line[1], frequencia=dias_dict[u''.join(dia.split())])
try:
endereco = (Endereco.objects.get(nome_bruto=line[2]), False)
coleta = Coleta.objects.get_or_create(endereco=endereco[0], setor=setor[0], rota=line[4])
coleta_horario = ColetaHorario.objects.get_or_create(horario=horario[0], coleta=coleta[0])
except Endereco.DoesNotExist:
try:
geocode = gmaps.geocode(line[2] + u' RECIFE PERNAMBUCO')
latitude = geocode[0]['geometry']['location']['lat']
longitude = geocode[0]['geometry']['location']['lng']
nome_min = line[2]
nome = line[2]
bairro = u'Não informado'
is_valid_area = True
if not (latitude == -8.0578381 and longitude == -34.8828969):
geoindex = len(geocode[0]['address_components'])
for i in range(0, geoindex):
types = geocode[0]['address_components'][i]['types'][0]
if 'administrative_area_level_2' in types:
if geocode[0]['address_components'][i]['long_name'] != 'Recife':
is_valid_area = False
if 'route' in types or 'bus_station' in types or 'transit_station' in types\
or 'subway_station' in types or 'train_station' in types:
nome_min = geocode[0]['address_components'][i]['short_name']
nome = geocode[0]['address_components'][i]['long_name']
elif geocode[0]['address_components'][i]['types'][0] == 'neighborhood':
bairro = geocode[0]['address_components'][i]['long_name']
if is_valid_area:
try:
endereco = Endereco.objects.get_or_create(nome=nome, latitude=latitude, longitude=longitude,
nome_min=nome_min, bairro=bairro, nome_bruto=line[2])
except IntegrityError:
endereco = (Endereco.objects.get(nome=nome), False)
else:
endereco = Endereco.objects.get_or_create(nome=line[2], latitude=latitude, longitude=longitude,
nome_min=line[2], bairro=u'Não encontrado', nome_bruto=line[2])
is_valid_area = True
coleta = Coleta.objects.get_or_create(endereco=endereco[0], setor=setor[0], rota=line[4])
coleta_horario = ColetaHorario.objects.get_or_create(horario=horario[0], coleta=coleta[0])
except IndexError:
not_found += 1
endereco = (line[2], False)
coleta = (line[2] + u' - ' + unicode(setor[0]), False)
counter += 1
print counter
fim = datetime.now()
print 'Falhas: ' + unicode(not_found)
print fim - inicio | victorfsf/RecRecife | coleta_seletiva.py | Python | gpl-2.0 | 4,704 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
class NonSerializableTraceData(Exception):
"""Raised when raw trace data cannot be serialized to TraceData."""
pass
def _ValidateRawData(raw):
try:
json.dumps(raw)
except TypeError as e:
raise NonSerializableTraceData('TraceData is not serilizable: %s' % e)
except ValueError as e:
raise NonSerializableTraceData('TraceData is not serilizable: %s' % e)
class TraceDataPart(object):
"""TraceData can have a variety of events.
These are called "parts" and are accessed by the following fixed field names.
"""
def __init__(self, raw_field_name):
self._raw_field_name = raw_field_name
def __repr__(self):
return 'TraceDataPart("%s")' % self._raw_field_name
@property
def raw_field_name(self):
return self._raw_field_name
CHROME_TRACE_PART = TraceDataPart('traceEvents')
INSPECTOR_TRACE_PART = TraceDataPart('inspectorTimelineEvents')
SURFACE_FLINGER_PART = TraceDataPart('surfaceFlinger')
TAB_ID_PART = TraceDataPart('tabIds')
TELEMETRY_PART = TraceDataPart('telemetry')
ALL_TRACE_PARTS = {CHROME_TRACE_PART,
INSPECTOR_TRACE_PART,
SURFACE_FLINGER_PART,
TAB_ID_PART,
TELEMETRY_PART}
def _HasEventsFor(part, raw):
assert isinstance(part, TraceDataPart)
if part.raw_field_name not in raw:
return False
return len(raw[part.raw_field_name]) > 0
class TraceData(object):
"""Validates, parses, and serializes raw data.
NOTE: raw data must only include primitive objects!
By design, TraceData must contain only data that is BOTH json-serializable
to a file, AND restorable once again from that file into TraceData without
assistance from other classes.
Raw data can be one of three standard trace_event formats:
1. Trace container format: a json-parseable dict.
2. A json-parseable array: assumed to be chrome trace data.
3. A json-parseable array missing the final ']': assumed to be chrome trace
data.
"""
def __init__(self, raw_data=None):
"""Creates TraceData from the given data."""
self._raw_data = {}
self._events_are_safely_mutable = False
if not raw_data:
return
_ValidateRawData(raw_data)
if isinstance(raw_data, basestring):
if raw_data.startswith('[') and not raw_data.endswith(']'):
if raw_data.endswith(','):
raw_data = raw_data[:-1]
raw_data += ']'
json_data = json.loads(raw_data)
# The parsed data isn't shared with anyone else, so we mark this value
# as safely mutable.
self._events_are_safely_mutable = True
else:
json_data = raw_data
if isinstance(json_data, dict):
self._raw_data = json_data
elif isinstance(json_data, list):
if len(json_data) == 0:
self._raw_data = {}
self._raw_data = {CHROME_TRACE_PART.raw_field_name: json_data}
else:
raise Exception('Unrecognized data format.')
def _SetFromBuilder(self, d):
self._raw_data = d
self._events_are_safely_mutable = True
@property
def events_are_safely_mutable(self):
"""Returns true if the events in this value are completely sealed.
Some importers want to take complex fields out of the TraceData and add
them to the model, changing them subtly as they do so. If the TraceData
was constructed with data that is shared with something outside the trace
data, for instance a test harness, then this mutation is unexpected. But,
if the values are sealed, then mutating the events is a lot faster.
We know if events are sealed if the value came from a string, or if the
value came from a TraceDataBuilder.
"""
return self._events_are_safely_mutable
@property
def active_parts(self):
return {p for p in ALL_TRACE_PARTS if p.raw_field_name in self._raw_data}
@property
def metadata_records(self):
part_field_names = {p.raw_field_name for p in ALL_TRACE_PARTS}
for k, v in self._raw_data.iteritems():
if k in part_field_names:
continue
yield {
'name': k,
'value': v
}
def HasEventsFor(self, part):
return _HasEventsFor(part, self._raw_data)
def GetEventsFor(self, part):
if not self.HasEventsFor(part):
return []
assert isinstance(part, TraceDataPart)
return self._raw_data[part.raw_field_name]
def Serialize(self, f, gzip_result=False):
"""Serializes the trace result to a file-like object.
Always writes in the trace container format.
"""
assert not gzip_result, 'Not implemented'
json.dump(self._raw_data, f)
class TraceDataBuilder(object):
"""TraceDataBuilder helps build up a trace from multiple trace agents.
TraceData is supposed to be immutable, but it is useful during recording to
have a mutable version. That is TraceDataBuilder.
"""
def __init__(self):
self._raw_data = {}
def AsData(self):
if self._raw_data == None:
raise Exception('Can only AsData once')
data = TraceData()
data._SetFromBuilder(self._raw_data)
self._raw_data = None
return data
def AddEventsTo(self, part, events):
"""Note: this won't work when called from multiple browsers.
Each browser's trace_event_impl zeros its timestamps when it writes them
out and doesn't write a timebase that can be used to re-sync them.
"""
assert isinstance(part, TraceDataPart)
assert isinstance(events, list)
if self._raw_data == None:
raise Exception('Already called AsData() on this builder.')
self._raw_data.setdefault(part.raw_field_name, []).extend(events)
def HasEventsFor(self, part):
return _HasEventsFor(part, self._raw_data)
| SummerLW/Perf-Insight-Report | telemetry/telemetry/timeline/trace_data.py | Python | bsd-3-clause | 5,820 |
# -*- coding: utf-8 -*-
from django.db import models
from django.template.defaultfilters import slugify
from smart_selects.db_fields import ChainedForeignKey
from lugar.models import *
# Create your models here.
class Status_Legal(models.Model):
nombre = models.CharField(max_length=200)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Status Legal'
verbose_name_plural = 'Status Legal'
class Sector(models.Model):
nombre = models.CharField(max_length=200)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Sector'
verbose_name_plural = 'Sectores'
class Organizacion(models.Model):
nombre = models.CharField(max_length=200)
status_legal = models.ForeignKey(Status_Legal)
anno_fundacion = models.DateField(verbose_name='Año de fundación')
dueno = models.CharField(verbose_name='Dueño, Presidente, Director',max_length=200)
numero_activistas = models.IntegerField(verbose_name='Numero de activistas o miembros')
direccion = models.CharField(max_length=200)
departamento = models.ForeignKey(Departamento)
municipio = ChainedForeignKey(
Municipio,
chained_field="departamento",
chained_model_field="departamento",
show_all=False, auto_choose=True)
telefono = models.IntegerField()
fax = models.IntegerField()
email = models.EmailField()
sector = models.ForeignKey(Sector)
slug = models.SlugField(editable=False)
def __unicode__(self):
return self.nombre
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.nombre)
super(Organizacion, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Organización'
verbose_name_plural = 'Organizaciones'
class Estado(models.Model):
estado = models.CharField(max_length=50)
def __unicode__(self):
return self.estado
class Ubicacion(models.Model):
ubicacion = models.CharField(max_length=50)
def __unicode__(self):
return self.ubicacion
class Meta:
verbose_name = 'Ubicación'
verbose_name_plural = 'Ubicaciones'
class Socio(models.Model):
socio = models.CharField(max_length=100)
def __unicode__(self):
return self.socio
class Tema(models.Model):
tema = models.CharField(max_length=200)
def __unicode__(self):
return self.tema
class Grupo(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Grupo_Beneficiario(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Grupo Beneficiario'
verbose_name_plural = 'Grupos Beneficiarios'
class Papel(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Papel'
verbose_name_plural = 'Papeles'
class Categoria(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Categoria_Innovacion(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Categoria de Innovación'
verbose_name_plural = 'Categorias de Innovación'
class Categoria_Conocimiento(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Categoria de Conocimiento'
verbose_name_plural = 'Categorias de Conocimiento'
class Categoria_Fuente(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Categoria de Fuente'
verbose_name_plural = 'Categorias de Fuente'
class Seleccion_7a(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Selección pregunta 7a'
verbose_name_plural = 'Selecciones pregunta 7a'
class Seleccion_7b(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Selección pregunta 7b'
verbose_name_plural = 'Selecciones pregunta 7b'
class Tipo_Estudio(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Tipo de estudio'
verbose_name_plural = 'Tipos de estudios'
class Tema_Relacion(models.Model):
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Tipo de Relación'
verbose_name_plural = 'Tipos de Relación'
| ErickMurillo/ciat_analisis_org | configuracion/models.py | Python | mit | 4,532 |
import numpy
from matplotlib import cm
def scale(array, min=None, max=None, gamma=1, output_max=255):
"""Return an array with values in the range [0, output_max].
If 'min' and/or 'max' are specified, these represent the values in the input
that will be mapped to 0 and 'output_max' in the output, respectively.
If not specified, the min and/or max values from the input will be used.
If gamma is specified, a gamma-transform will be applied to the final array.
"""
if min is None:
min = array.min()
if max is None:
max = array.max()
if min >= max:
return numpy.zeros_like(array)
err = numpy.seterr(under='ignore')
arr = ((numpy.clip(array.astype(float), min, max) - min) / (max - min))**gamma
numpy.seterr(**err)
return arr * output_max
def write_scaled(array, filename, min, max, gamma=1):
import freeimage
freeimage.write(scale(array, min, max, gamma).astype(numpy.uint8), filename)
def color_tint(array, target_color):
"""Given an array and an (R,G,B) color-tuple, return a color-tinted
array, where the intensity ranges from (0,0,0) to (R,G,B), weighted by the
array values. The output shape will be array.shape + (len(target_color),)
The input array MUST be scaled [0, 1] with 'scale()' or similar."""
array = numpy.asarray(array)
return array[..., numpy.newaxis] * target_color
def color_map(array, spectrum_max=0.925, uint8=True, cmap='plasma'):
"""Color-map the input array on a pleasing black-body-ish black-blue-red-orange-yellow
spectrum, using matplotlib's excellent and perceptually linear "plasma" or "inferno" colormap.
Parameters:
array: MUST be scaled [0, 1] with 'scale()' or similar.
spectrum_max: controls the point along the spectrum (0 to 1)
at which the colormap ends. A value of 1 is often too intensely
yellow for good visualization.
uint8: if True, return uint RGB tuples in range [0, 255], otherwise
floats in [0, 1]
cmap: matplotlib color map to use. Should be 'plasma' or 'inferno'...
Output: array of shape array.shape + (3,), where color values are RGB tuples
"""
# array scaled 0 to 1
array = numpy.asarray(array, dtype=float)
assert array.min() >= 0 and array.max() <= 1
rgb = cm.get_cmap(cmap)(array, bytes=numpy.uint8)[...,:3]
return rgb
def luminance(color_array):
"""Return luminance of an RGB (or RGBA) array (shape (x, y, 3) or (x, y, 4),
respectively) using the formula for CIE 1931 linear luminance:
https://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
"""
R, G, B = color_array.transpose((2,0,1))[:3]
return 0.2126*R + 0.7152*G + 0.0722*B
def screen(a, b, max_possible=255):
"""Blend two arrays together using the 'screen' mode.
Good for combining color-tinted fluorescence images with brightfield, for example.
Parameter 'max_possible' refers to the brightest-possible value in the array.
E.g. 1 for images scaled from 0 to 1, or 255 for images scaled from 0 to 255.
"""
a = a.astype(float)
b = b.astype(float)
return max_possible - (((max_possible - a)*(max_possible - b))/max_possible)
def multi_screen(arrays, max_possible=255):
"""Screen a list of arrays together. See 'screen()' for an explanation of the
parameters"""
b = arrays[0]
for a in arrays[1:]:
b = screen(a, b, max_possible)
return b
def alpha_blend(top, bottom, alpha):
"""Blend top image onto bottom image using the provided alpha value(s).
Parameters:
top, bottom: images, either of shape (x, y) or (x, y, c).
alpha: alpha value for blending, either scalar, or (x, y) mask. Must be
in the range [0, 1]
"""
alpha = numpy.asarray(alpha)
assert top.shape == bottom.shape
if len(top.shape) == 3 and len(alpha.shape) == 2:
# RBG image with 2d mask
alpha = alpha[:, :, numpy.newaxis]
return (top * alpha + bottom * (1-alpha)).astype(bottom.dtype)
def composite(bf, fl_images, fl_colors, bf_color=(255,255,255)):
"""Composite one or more fluorescence images on top of a brightfield image.
Parameters:
bf: brightfield image. MUST be scaled in the range [0, 1].
fl_images: list of fluorescence images. MUST be scaled in the range [0, 1].
fl_colors: list of RGB tuples for the color-tint of each fluorescence image.
bf_color: RGB tuple for the color-tint of the brigtfield image. (White is usual.)
Output: RGB image.
"""
bf = color_tint(bf, bf_color).astype(numpy.uint8)
fl_images = [color_tint(fl, cl).astype(numpy.uint8) for fl, cl in zip(fl_images, fl_colors)]
return multi_screen([bf] + fl_images)
def interpolate_color(array, zero_color, one_color):
"""Make an image with colors lineraly interpolated between two RGB tuples.
Input array MUST be in the range [0, 1]
"""
return color_tint(array, zero_color) + color_tint(array, one_color)
def neg_pos_color_tint(array, zero_color=(0,0,0), neg_color=(255,0,76), pos_color=(0,50,255)):
"""Tint a signed array with two different sets of colors, one for negative numbers
to zero, and one for zero to positive numbers.
Parameters:
array: MUST be scaled in the range [-1, 1]
zero_color: RGB tuple of the color at the zero value
pos_color: RBG tuple of the color that 1 in the input array should map to
neg_color: RBG tuple of the color that -1 in the input array should map to
"""
array = numpy.asarray(array)
negative_mask = array < 0
negative = array[negative_mask]
positive = array[~negative_mask]
neg_colors = interpolate_color(-negative, zero_color, neg_color)
pos_colors = interpolate_color(positive, zero_color, pos_color)
output = numpy.empty(array.shape, dtype=numpy.uint8)
output[negative_mask] = neg_colors
output[~negative_mask] = pos_colors
return output
def wavelength_to_rgb(l):
"""Given a wavelength in nanometers, regurn an RGB tuple using
the so-called "Bruton's Algorithm".
http://www.physics.sfasu.edu/astro/color/spectra.html
Note: wavelength parameter must be in the range [350, 780]
"""
assert (350 <= l <= 780)
if l < 440:
R = (440-l)/(440-350)
G = 0
B = 1
elif l < 490:
R = 0
G = (l-440)/(490-440)
B = 1
elif l < 510:
R = 0
G = 1
B = (510-l)/(510-490)
elif l < 580:
R = (l-510)/(580-510)
G = 1
B = 0
elif l < 645:
R = 1
G = (645-l)/(645-580)
B = 0
else:
R = 1
G = 0
B = 0
if l > 700:
intensity = 0.3 + 0.7 * (780-l)/(780-700)
elif l < 420:
intensity = 0.3 + 0.7 * (l-350)/(420-350)
else:
intensity = 1
return (255 * intensity * numpy.array([R,G,B])).astype(numpy.uint8)
| zpincus/zplib | zplib/image/colorize.py | Python | mit | 6,954 |
"""Leetcode 716. Max Stack (Premium)
Easy
URL: https://leetcode.com/problems/max-stack
Design a max stack that supports push, pop, top, peekMax and popMax.
1. push(x) -- Push element x onto stack.
2. pop() -- Remove the element on top of the stack and return it.
3. top() -- Get the element on the top.
4. peekMax() -- Retrieve the maximum element in the stack.
5. popMax() -- Retrieve the maximum element in the stack, and remove it.
If you find more than one maximum elements, only remove the top-most one.
Example 1:
MaxStack stack = new MaxStack();
stack.push(5);
stack.push(1);
stack.push(5);
stack.top(); -> 5
stack.popMax(); -> 5
stack.top(); -> 1
stack.peekMax(); -> 5
stack.pop(); -> 1
stack.top(); -> 5
Note:
- -1e7 <= x <= 1e7
- Number of operations won't exceed 10000.
- The last four operations won't be called when stack is empty.
"""
class MaxStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self._stack = []
def push(self, x):
"""
:type x: int
:rtype: None
Time complexity: O(1).
Space complexity: O(n).
"""
if not self._stack:
self._stack.append((x, x))
else:
maximum = max(self._stack[-1][1], x)
self._stack.append((x, maximum))
def pop(self):
"""
:rtype: None
Time complexity: O(1).
Space complexity: O(n).
"""
if not self._stack:
return None
return self._stack.pop()[0]
def top(self):
"""
:rtype: int
Time complexity: O(1).
Space complexity: O(n).
"""
if not self._stack:
return None
return self._stack[-1][0]
def peekMax(self):
"""
:rtype: int
Time complexity: O(1).
Space complexity: O(n).
"""
if not self._stack:
return None
return self._stack[-1][1]
def popMax(self):
"""
:rtype: int
Time complexity: O(n).
Space complexity: O(n).
"""
if not self._stack:
return None
# Pop until found current max, then push back the others.
maximum = self._stack[-1][1]
stack = []
while self._stack[-1][0] != maximum:
stack.append(self._stack.pop()[0])
self._stack.pop()
while stack:
self.push(stack.pop())
return maximum
def main():
max_stack = MaxStack()
max_stack.push(5)
max_stack.push(1)
max_stack.push(5)
# Output: 5
print max_stack.top()
# Output: 5
print max_stack.popMax()
# Output: 1
print max_stack.top()
# Output: 5
print max_stack.peekMax()
# Output: 1
print max_stack.pop()
# Output: 5
print max_stack.top()
if __name__ == '__main__':
main()
| bowen0701/algorithms_data_structures | lc0716_max_stack.py | Python | bsd-2-clause | 2,906 |
import contextlib
import logging
import zeit.cms.checkout.interfaces
log = logging.getLogger(__name__)
def with_checked_out(content, function, events=True):
"""Call a function with a checked out version of content.
Function makes sure content is checked back in after the function ran.
"""
with checked_out(content, events) as checked_out_obj:
if checked_out_obj is not None:
changed = function(checked_out_obj)
if not changed:
raise zeit.cms.checkout.interfaces.NotChanged()
@contextlib.contextmanager
def checked_out(content, events=True, semantic_change=None,
ignore_conflicts=False, temporary=True):
__traceback_info__ = (content.uniqueId,)
manager = zeit.cms.checkout.interfaces.ICheckoutManager(content)
try:
checked_out = manager.checkout(temporary=temporary, event=events)
except zeit.cms.checkout.interfaces.CheckinCheckoutError:
log.warning("Could not checkout %s." %
content.uniqueId, exc_info=True)
yield None
else:
try:
yield checked_out
except zeit.cms.checkout.interfaces.NotChanged:
del checked_out.__parent__[checked_out.__name__]
else:
manager = zeit.cms.checkout.interfaces.ICheckinManager(checked_out)
manager.checkin(event=events, semantic_change=semantic_change,
ignore_conflicts=ignore_conflicts)
| ZeitOnline/zeit.cms | src/zeit/cms/checkout/helper.py | Python | bsd-3-clause | 1,472 |
#!/usr/bin/python
# -*- coding: utf8 -*-
from decimal import Decimal
TIPOS_OP = {1: 'Compraventa de granos', 2: u'Consignación de granos'}
GRANOS = {
1: 'LINO', 2: 'GIRASOL', 3: 'MANI EN CAJA',
4: 'GIRASOL DESCASCARADO', 5: 'MANI PARA INDUSTRIA DE SELECCION',
6: 'MANI PARA INDUSTRIA ACEITERA', 7: 'MANI TIPO CONFITERIA',
8: 'COLZA', 9: 'COLZA 00 CANOLA', 10: 'TRIGO FORRAJERO',
11: 'CEBADA FORRAJERA', 12: 'CEBADA APTA PARA MALTERIA',
14: 'TRIGO CANDEAL', 15: 'TRIGO PAN',
16: 'AVENA', 17: 'CEBADA CERVECERA', 18: 'CENTENO',
19: 'MAIZ', 20: 'MIJO', 21: 'ARROZ CASCARA',
22: 'SORGO GRANIFERO', 23: 'SOJA', 25: 'TRIGO PLATA',
26: 'MAIZ FLYNT O PLATA', 27: 'MAIZ PISINGALLO',
28: 'TRITICALE', 30: 'ALPISTE', 31: 'ALGODON', 32: 'CARTAMO',
33: 'POROTO BLANCO NATURAL OVAL Y ALUBIA',
34: 'POROTO DISTINTO DEL BLANCO OVAL Y ALUBIA',
35: 'ARROZ', 46: 'LENTEJA', 47: 'ARVEJA',
48: 'POROTO BLANCO SELECCIONADO OVAL Y ALUBIA',
49: 'OTRAS LEGUMBRES', 50: 'OTROS GRANOS', 59: 'GARBANZO', }
PUERTOS = {1: "SAN LORENZO/SAN MARTIN", 2: "ROSARIO",
3: "BAHIA BLANCA", 4: "NECOCHEA", 5: "RAMALLO", 6: "LIMA",
7: "DIAMANTE", 8: "BUENOS AIRES", 9: "SAN PEDRO",
10: "SAN NICOLAS", 11: "TERMINAL DEL GUAZU", 12: "ZARATE",
13: "VILLA CONSTITUCION"}
PROVINCIAS = {1: 'BUENOS AIRES', 0: 'CAPITAL FEDERAL',
2: 'CATAMARCA', 16: 'CHACO', 17: 'CHUBUT',
4: 'CORRIENTES', 3: u'CÓRDOBA', 5: 'ENTRE RIOS',
18: 'FORMOSA', 6: 'JUJUY', 21: 'LA PAMPA',
8: 'LA RIOJA', 7: 'MENDOZA', 19: 'MISIONES',
20: u'NEUQUÉN', 22: 'RIO NEGRO', 9: 'SALTA',
10: 'SAN JUAN', 11: 'SAN LUIS', 23: 'SANTA CRUZ',
12: 'SANTA FE', 13: 'SANTIAGO DEL ESTERO',
24: 'TIERRA DEL FUEGO', 14: u'TUCUMÁN'}
TIPO_CERT_DEP = {1: "F1116/RT", 5: "F1116/A", 332: u"Cert.Elec."}
CAMPANIAS = {1213: "2012/2013", 1112: "2011/2012", 1011: "2010/2011",
910: "2009/2010", 809: "2008/2009", 708: "2007/2008",
607: "2006/2007", 506: "2005/2006", 405: "2004/2005",
304: "2003/2004", 1314: "2013/2014", 1415: "2014/2015"}
ACTIVIDADES = {41: "FRACCIONADOR DE GRANOS", 29: "ACOPIADOR - CONSIGNATARIO",
33: "CANJEADOR DE BIENES Y/O SERVICIOS POR GRANO",
40: "EXPORTADOR", 31: u"ACOPIADOR DE MANÍ",
30: "ACOPIADOR DE LEGUMBRES",
35: "COMPRADOR DE GRANO PARA CONSUMO PROPIO",
44: "INDUSTRIAL ACEITERO", 47: "INDUSTRIAL BIOCOMBUSTIBLE",
46: "INDUSTRIAL BALANCEADOR", 48: "INDUSTRIAL CERVECERO",
49: "INDUSTRIAL DESTILERIA",
51: "INDUSTRIAL MOLINO DE HARINA DE TRIGO",
50: "INDUSTRIAL MOLINERO", 45: "INDUSTRIAL ARROCERO",
59: "USUARIO DE MOLIENDA DE TRIGO(incluye MAQUILA)",
57: "USUARIO DE INDUSTRIA (Otros granos MENOS trigo)",
52: "INDUSTRIAL SELECCIONADOR", 34: "COMPLEJO INDUSTRIAL",
28: "ACONDICIONADOR", 36: "CORREDOR",
55: "MERCADO DE FUTUROS Y OPCIONES O MERCADO A TERMINO",
39: "EXPLOTADOR DE DEPOSITO Y/O ELEVADOR DE GRANOS",
37: "DESMOTADOR DE ALGODON",
}
# Grados
GRADOS_REF = {u'G3': u'Grado 3', u'G2': u'Grado 2', u'G1': u'Grado 1'}
# Datos de grado entregado por tipo de granos:
GRADO_ENT_VALOR = {
49 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
25 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
26 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
27 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
20 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
21 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
22 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
23 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
46 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
47 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
48 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
28 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
1 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
3 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
2 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
5 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
4 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
7 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
6 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
9 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
8 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
59 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
11 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
10 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
12 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
15 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.99'), u'G2': Decimal('1.00'), u'G1': Decimal('1.015'), u'FG': Decimal('0')},
14 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
17 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
16 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
19 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
18 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
31 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
30 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
50 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
35 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
34 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
33 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
32 : {u'F1': Decimal('0'), u'F2': Decimal('0'), u'F3': Decimal('0'), u'G3': Decimal('0.985'), u'G2': Decimal('1.00'), u'G1': Decimal('1.01'), u'FG': Decimal('0')},
}
# Diccionario de localidades por provincia
# (wslpg.py lo reemplaza con un shelve si es posible)
LOCALIDADES = {}
| guduchango/pyafipws | wslpg_datos.py | Python | gpl-3.0 | 10,039 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class NbadraftNetPipeline(object):
def process_item(self, item, spider):
return item
| markalansmith/draftmim | spider/nbadraft_net/nbadraft_net/pipelines.py | Python | apache-2.0 | 291 |
import json
from django.contrib.auth.models import Group
from rest_framework.test import APIClient
from rest_framework import status
from rest_framework.test import APITestCase
from hs_core.hydroshare import users
class TestUserInfo(APITestCase):
def setUp(self):
self.maxDiff = None
self.client = APIClient()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
# create a user
self.email = 'test_user@email.com'
self.username = 'testuser'
self.first_name = 'some_first_name'
self.last_name = 'some_last_name'
self.user = users.create_account(
self.email,
username=self.username,
first_name=self.first_name,
last_name=self.last_name,
superuser=False)
self.client.force_authenticate(user=self.user)
def test_user_info(self):
response = self.client.get('/hsapi/userInfo/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content)
self.assertEqual(content['username'], self.username)
self.assertEqual(content['email'], self.email)
self.assertEqual(content['first_name'], self.first_name)
self.assertEqual(content['last_name'], self.last_name)
| RENCI/xDCIShare | hs_core/tests/api/rest/test_user_info.py | Python | bsd-3-clause | 1,326 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from fontTools.misc.transform import Transform
from robofab.objects.objectsRF import RPoint
from fontbuild.Build import FontProject
from fontbuild.italics import condenseGlyph
from fontbuild.italics import transformFLGlyphMembers
from fontbuild.mix import Master
from fontbuild.mix import Mix
# The root of the Roboto tree
BASEDIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
# Masters
rg = Master("%s/src/v2/Roboto_Regular.ufo" % BASEDIR,
anchorPath="%s/res/anchors_regular.json" % BASEDIR)
bd = Master("%s/src/v2/Roboto_Bold.ufo" % BASEDIR,
anchorPath="%s/res/anchors_bold.json" % BASEDIR)
th = Master("%s/src/v2/Roboto_Thin.ufo" % BASEDIR,
anchorPath="%s/res/anchors_thin.json" % BASEDIR)
# build condensed masters
lessCondensed = (
"plusminus bracketleft bracketright dieresis macron "
"percent multiply degree at i j "
"zero one two three four five six seven eight nine "
"braceright braceleft").split()
uncondensed = (
"tonos breve acute grave quotesingle quotedbl asterisk "
"period currency registered copyright bullet ring degree "
"dieresis comma bar brokenbar dotaccent dotbelow "
"colon semicolon uniFFFC uniFFFD uni0488 uni0489 ringbelow "
"estimated").split()
moreCondensed = "z Z M W A V".split()
def condenseFont(font, scale=.8, stemWidth=185):
f = font.copy()
xscale = scale
CAPS = ("A B C.cn D.cn E F G.cn H I J K L M N O.cn P Q.cn R S T U.cn V W X "
"Y Z one two three four five six seven eight nine zero").split()
LC = ("a.cn b.cn c.cn d.cn e.cn f g.cn h i j k l m n o.cn p.cn q.cn r s t "
"u v w x y z").split()
# for g in [f[name] for name in LC]:
for g in f:
if len(g) > 0:
# print g.name
if g.name in lessCondensed:
scale = xscale * 1.1
if g.name in uncondensed:
continue
if g.name in moreCondensed:
scale = xscale * .90
# g2 = condenseGlyph(g, xscale)
# g.clear()
# g2.drawPoints(g.getPointPen())
m = Transform(xscale, 0, 0, 1, 20, 0)
g.transform(m)
transformFLGlyphMembers(g, m, transformAnchors=False)
if g.width != 0:
g.width += 40
return f
proj = FontProject(rg.font, BASEDIR, "res/roboto.cfg", th.ffont)
#proj.incrementBuildNumber()
# FAMILYNAME = "Roboto 2 DRAFT"
# FAMILYNAME = "Roboto2"
FAMILYNAME = "Roboto"
proj.buildOTF = True
#proj.autohintOTF = True
proj.buildTTF = True
proj.generateFont(th.font, "%s/Thin/Regular/Th"%FAMILYNAME)
proj.generateFont(Mix([th, rg], 0.45), "%s/Light/Regular/Lt"%FAMILYNAME)
proj.generateFont(Mix([th, rg], RPoint(0.90, 0.92)),
"%s/Regular/Regular/Rg"%FAMILYNAME)
proj.generateFont(Mix([rg, bd], 0.35), "%s/Medium/Regular/Lt"%FAMILYNAME)
proj.generateFont(Mix([rg, bd], RPoint(0.73, 0.73)),
"%s/Bold/Bold/Rg"%FAMILYNAME)
proj.generateFont(Mix([rg, bd], RPoint(1.125, 1.0)),
"%s/Black/Bold/Bk"%FAMILYNAME)
proj.generateFont(th.font, "%s/Thin Italic/Italic/Th"%FAMILYNAME,
italic=True, stemWidth=80)
proj.generateFont(Mix([th, rg], 0.45), "%s/Light Italic/Italic/Lt"%FAMILYNAME,
italic=True, stemWidth=120)
proj.generateFont(Mix([th, rg], RPoint(0.90, 0.92)),
"%s/Italic/Italic/Rg"%FAMILYNAME, italic=True, stemWidth=185)
proj.generateFont(Mix([rg, bd], 0.35), "%s/Medium Italic/Italic/Lt"%FAMILYNAME,
italic=True, stemWidth=230)
proj.generateFont(Mix([rg, bd], RPoint(0.73, 0.73)),
"%s/Bold Italic/Bold Italic/Rg"%FAMILYNAME,
italic=True, stemWidth=290)
proj.generateFont(Mix([rg, bd], RPoint(1.125, 1.0)),
"%s/Black Italic/Bold Italic/Bk"%FAMILYNAME,
italic=True, stemWidth=290)
thcn1 = Master(condenseFont(th.font, .84, 40))
cn1 = Master(rg.ffont.addDiff(thcn1.ffont, th.ffont))
bdcn1 = Master(bd.ffont.addDiff(thcn1.ffont, th.ffont))
proj.generateFont(Mix([thcn1, cn1], RPoint(0.45, 0.47)),
"%s Condensed/Light/Regular/Lt"%FAMILYNAME,
swapSuffixes=[".cn"])
proj.generateFont(Mix([thcn1, cn1], RPoint(0.9, 0.92)),
"%s Condensed/Regular/Regular/Rg"%FAMILYNAME,
swapSuffixes=[".cn"])
proj.generateFont(Mix([cn1, bdcn1], RPoint(0.75, 0.75)),
"%s Condensed/Bold/Bold/Rg"%FAMILYNAME,
swapSuffixes=[".cn"])
proj.generateFont(Mix([thcn1, cn1], RPoint(0.45, 0.47)),
"%s Condensed/Light Italic/Italic/Lt"%FAMILYNAME,
italic=True, swapSuffixes=[".cn"], stemWidth=120)
proj.generateFont(Mix([thcn1, cn1], RPoint(0.9, 0.92)),
"%s Condensed/Italic/Italic/Rg"%FAMILYNAME,
italic=True, swapSuffixes=[".cn"], stemWidth=185)
proj.generateFont(Mix([cn1, bdcn1], RPoint(0.75, 0.75)),
"%s Condensed/Bold Italic/Bold Italic/Rg"%FAMILYNAME,
italic=True, swapSuffixes=[".cn"], stemWidth=240)
sys.exit(0)
| supriyantomaftuh/roboto | scripts/build-v2.py | Python | apache-2.0 | 5,738 |
#!/usr/bin/env python3
'''
@author Michele Tomaiuolo - http://www.ce.unipr.it/people/tomamic
@license This software is free - http://www.gnu.org/licenses/gpl.html
'''
import sys; sys.path.append('../examples/')
import g2d
from random import choice, randrange
from actor import Actor, Arena
class Ball(Actor):
W, H = 20, 20
SPEED = 5
def __init__(self, x, y):
self._x, self._y = x, y
self._dx, self._dy = self.SPEED, self.SPEED
def move(self, arena):
arena_w, arena_h = arena.size()
if not (0 <= self._x + self._dx <= arena_w - self.W):
self._dx = -self._dx
if not (0 <= self._y + self._dy <= arena_h - self.H):
self._dy = -self._dy
self._x += self._dx
self._y += self._dy
def collide(self, other, arena):
if isinstance(other, Wall):
bx, by = self.pos() # ball's pos
bw, bh = self.size()
wx, wy = other.pos() # wall's pos
ww, wh = other.size()
borders_distance = [(wx - bw - bx, 0), (wx + ww - bx, 0),
(0, wy - bh - by), (0, wy + wh - by)]
# move to the nearest border: left, right, top or bottom
move = min(borders_distance, key=lambda m: abs(m[0] + m[1]))
self._x += move[0]
self._y += move[1]
def pos(self):
return self._x, self._y
def size(self):
return self.W, self.H
def sprite(self):
return 0, 0
class Wall(Actor):
def __init__(self, x, y, w, h):
self._x, self._y = x, y
self._w, self._h = w, h
def move(self, arena):
pass
def collide(self, other, arena):
pass
def pos(self):
return self._x, self._y
def size(self):
return self._w, self._h
def sprite(self):
return 0, 0
def tick():
arena.tick() # Game logic
g2d.clear_canvas()
for a in arena.actors():
if isinstance(a, Wall):
g2d.fill_rect(a.pos(), a.size())
else:
g2d.draw_image_clip("../examples/sprites.png", a.pos(), a.sprite(), a.size())
def main():
global arena, sprites
arena = Arena((320, 240))
arena.spawn(Ball(40, 80))
arena.spawn(Ball(85, 40))
arena.spawn(Wall(115, 80, 100, 20))
g2d.init_canvas(arena.size())
g2d.main_loop(tick)
main()
| tomamic/fondinfo | exercises/e5_2016_2_wall_g2d.py | Python | gpl-3.0 | 2,373 |
"""The exceptions used by Home Assistant."""
from typing import Optional, Tuple, TYPE_CHECKING
import jinja2
# pylint: disable=using-constant-test
if TYPE_CHECKING:
# pylint: disable=unused-import
from .core import Context # noqa
class HomeAssistantError(Exception):
"""General Home Assistant exception occurred."""
class InvalidEntityFormatError(HomeAssistantError):
"""When an invalid formatted entity is encountered."""
class NoEntitySpecifiedError(HomeAssistantError):
"""When no entity is specified."""
class TemplateError(HomeAssistantError):
"""Error during template rendering."""
def __init__(self, exception: jinja2.TemplateError) -> None:
"""Init the error."""
super().__init__('{}: {}'.format(exception.__class__.__name__,
exception))
class PlatformNotReady(HomeAssistantError):
"""Error to indicate that platform is not ready."""
class ConfigEntryNotReady(HomeAssistantError):
"""Error to indicate that config entry is not ready."""
class InvalidStateError(HomeAssistantError):
"""When an invalid state is encountered."""
class Unauthorized(HomeAssistantError):
"""When an action is unauthorized."""
def __init__(self, context: Optional['Context'] = None,
user_id: Optional[str] = None,
entity_id: Optional[str] = None,
config_entry_id: Optional[str] = None,
perm_category: Optional[str] = None,
permission: Optional[Tuple[str]] = None) -> None:
"""Unauthorized error."""
super().__init__(self.__class__.__name__)
self.context = context
self.user_id = user_id
self.entity_id = entity_id
self.config_entry_id = config_entry_id
# Not all actions have an ID (like adding config entry)
# We then use this fallback to know what category was unauth
self.perm_category = perm_category
self.permission = permission
class UnknownUser(Unauthorized):
"""When call is made with user ID that doesn't exist."""
class ServiceNotFound(HomeAssistantError):
"""Raised when a service is not found."""
def __init__(self, domain: str, service: str) -> None:
"""Initialize error."""
super().__init__(
self, "Service {}.{} not found".format(domain, service))
self.domain = domain
self.service = service
def __str__(self) -> str:
"""Return string representation."""
return "Unable to find service {}/{}".format(self.domain, self.service)
| aequitas/home-assistant | homeassistant/exceptions.py | Python | apache-2.0 | 2,598 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class Contact(models.Model):
_name = 'res.partner'
_inherit = ['res.partner', 'phone.validation.mixin']
@api.onchange('phone', 'country_id', 'company_id')
def _onchange_phone_validation(self):
if self.phone:
self.phone = self.phone_format(self.phone)
@api.onchange('mobile', 'country_id', 'company_id')
def _onchange_mobile_validation(self):
if self.mobile:
self.mobile = self.phone_format(self.mobile)
| t3dev/odoo | addons/crm_phone_validation/models/res_partner.py | Python | gpl-3.0 | 600 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 7 Feb 2014
Copyright © 2014 Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
import Pyro4
import copy
import logging
from odemis import model
from odemis.driver import simsem
import os
import pickle
import threading
import time
import unittest
from unittest.case import skip
logging.getLogger().setLevel(logging.DEBUG)
# arguments used for the creation of basic components
CONFIG_SED = {"name": "sed", "role": "sed"}
CONFIG_BSD = {"name": "bsd", "role": "bsd"}
CONFIG_SCANNER = {"name": "scanner", "role": "ebeam"}
CONFIG_FOCUS = {"name": "focus", "role": "ebeam-focus"}
CONFIG_SEM = {"name": "sem", "role": "sem", "image": "simsem-fake-output.h5",
"drift_period": 0.1,
"children": {"detector0": CONFIG_SED, "scanner": CONFIG_SCANNER,
"focus": CONFIG_FOCUS}
}
class TestSEMStatic(unittest.TestCase):
"""
Tests which don't need a SEM component ready
"""
def test_creation(self):
"""
Doesn't even try to acquire an image, just create and delete components
"""
sem = simsem.SimSEM(**CONFIG_SEM)
self.assertEqual(len(sem.children.value), 3)
for child in sem.children.value:
if child.name == CONFIG_SED["name"]:
sed = child
elif child.name == CONFIG_SCANNER["name"]:
scanner = child
self.assertEqual(len(scanner.resolution.value), 2)
self.assertIsInstance(sed.data, model.DataFlow)
self.assertTrue(sem.selfTest(), "SEM self test failed.")
sem.terminate()
def test_error(self):
wrong_config = copy.deepcopy(CONFIG_SEM)
wrong_config["device"] = "/dev/comdeeeee"
self.assertRaises(Exception, simsem.SimSEM, **wrong_config)
wrong_config = copy.deepcopy(CONFIG_SEM)
wrong_config["children"]["scanner"]["channels"] = [1, 1]
self.assertRaises(Exception, simsem.SimSEM, **wrong_config)
def test_pickle(self):
try:
os.remove("testds")
except OSError:
pass
daemon = Pyro4.Daemon(unixsocket="testds")
sem = simsem.SimSEM(daemon=daemon, **CONFIG_SEM)
dump = pickle.dumps(sem, pickle.HIGHEST_PROTOCOL)
# print "dump size is", len(dump)
sem_unpickled = pickle.loads(dump)
self.assertIsInstance(sem_unpickled.children, model.VigilantAttributeBase)
self.assertEqual(sem_unpickled.name, sem.name)
sem.terminate()
daemon.shutdown()
class TestSEM(unittest.TestCase):
"""
Tests which can share one SEM device
"""
@classmethod
def setUpClass(cls):
cls.sem = simsem.SimSEM(**CONFIG_SEM)
for child in cls.sem.children.value:
if child.name == CONFIG_SED["name"]:
cls.sed = child
elif child.name == CONFIG_SCANNER["name"]:
cls.scanner = child
elif child.name == CONFIG_FOCUS["name"]:
cls.focus = child
@classmethod
def tearDownClass(cls):
cls.sem.terminate()
time.sleep(3)
def setUp(self):
# reset resolution and dwellTime
self.scanner.scale.value = (1, 1)
self.scanner.resolution.value = (512, 256)
self.sed.bpp.value = max(self.sed.bpp.choices)
self.size = self.scanner.resolution.value
self.scanner.dwellTime.value = self.scanner.dwellTime.range[0]
self.acq_dates = (set(), set()) # 2 sets of dates, one for each receiver
self.acq_done = threading.Event()
def tearDown(self):
# print gc.get_referrers(self.camera)
# gc.collect()
pass
def assertTupleAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""
check two tuples are almost equal (value by value)
"""
for f, s in zip(first, second):
self.assertAlmostEqual(f, s, places=places, msg=msg, delta=delta)
def compute_expected_duration(self):
dwell = self.scanner.dwellTime.value
settle = 5.e-6
size = self.scanner.resolution.value
return size[0] * size[1] * dwell + size[1] * settle
def test_acquire(self):
self.scanner.dwellTime.value = 10e-6 # s
expected_duration = self.compute_expected_duration()
start = time.time()
im = self.sed.data.get()
duration = time.time() - start
self.assertEqual(im.shape, self.size[::-1])
self.assertGreaterEqual(duration, expected_duration, "Error execution took %f s, less than exposure time %d." % (duration, expected_duration))
self.assertIn(model.MD_DWELL_TIME, im.metadata)
def test_acquire_8bpp(self):
self.sed.bpp.value = 8
self.scanner.dwellTime.value = 10e-6 # s
expected_duration = self.compute_expected_duration()
start = time.time()
im = self.sed.data.get()
duration = time.time() - start
self.assertEqual(im.shape, self.size[::-1])
self.assertGreaterEqual(duration, expected_duration, "Error execution took %f s, less than exposure time %d." % (duration, expected_duration))
self.assertIn(model.MD_DWELL_TIME, im.metadata)
self.assertEqual(im.metadata[model.MD_BPP], 8)
def test_hfv(self):
orig_pxs = self.scanner.pixelSize.value
orig_hfv = self.scanner.horizontalFoV.value
self.scanner.horizontalFoV.value = orig_hfv / 2
self.assertAlmostEqual(orig_pxs[0] / 2, self.scanner.pixelSize.value[0])
def test_small_res(self):
for i in range(8):
s = 1 + i * 1.1
for j in range(5):
r = int(2 ** j * 1.1)
self.scanner.scale.value = (s, s)
self.scanner.resolution.value = (r, r)
im = self.sed.data.get()
self.assertEqual(im.shape, (r, r),
"Scale = %g, res = %s gives shape %s" % (s, (r, r), im.shape)
)
def test_roi(self):
"""
check that .translation and .scale work
"""
# First, test simple behaviour on the VA
# max resolution
max_res = self.scanner.resolution.range[1]
self.scanner.scale.value = (1, 1)
self.scanner.resolution.value = max_res
self.scanner.translation.value = (-1, 1) # will be set back to 0,0 as it cannot move
self.assertEqual(self.scanner.translation.value, (0, 0))
# scale up
self.scanner.scale.value = (16, 16)
exp_res = (max_res[0] // 16, max_res[1] // 16)
self.assertTupleAlmostEqual(self.scanner.resolution.value, exp_res)
self.scanner.translation.value = (-1, 1)
self.assertEqual(self.scanner.translation.value, (0, 0))
# shift
exp_res = (max_res[0] // 32, max_res[1] // 32)
self.scanner.resolution.value = exp_res
self.scanner.translation.value = (-1, 1)
self.assertTupleAlmostEqual(self.scanner.resolution.value, exp_res)
self.assertEqual(self.scanner.translation.value, (-1, 1))
# change scale to some float
self.scanner.resolution.value = (max_res[0] // 16, max_res[1] // 16)
self.scanner.scale.value = (1.5, 2.3)
exp_res = (max_res[0] // 1.5, max_res[1] // 2.3)
self.assertTupleAlmostEqual(self.scanner.resolution.value, exp_res)
self.assertEqual(self.scanner.translation.value, (0, 0))
self.scanner.scale.value = (1, 1)
self.assertTupleAlmostEqual(self.scanner.resolution.value, max_res, delta=1.1)
self.assertEqual(self.scanner.translation.value, (0, 0))
# Then, check metadata fits with the expectations
center = (1e3, -2e3) #m
# simulate the information on the position (normally from the mdupdater)
self.scanner.updateMetadata({model.MD_POS: center})
self.scanner.resolution.value = max_res
self.scanner.scale.value = (16, 16)
self.scanner.dwellTime.value = self.scanner.dwellTime.range[0]
# normal acquisition
im = self.sed.data.get()
self.assertEqual(im.shape, self.scanner.resolution.value[-1::-1])
self.assertTupleAlmostEqual(im.metadata[model.MD_POS], center)
# shift a bit
# reduce the size of the image so that we can have translation
self.scanner.resolution.value = (max_res[0] // 32, max_res[1] // 32)
self.scanner.translation.value = (-1.26, 10) # px
pxs = self.scanner.pixelSize.value
exp_pos = (center[0] + (-1.26 * pxs[0]),
center[1] - (10 * pxs[1])) # because translation Y is opposite from physical one
im = self.sed.data.get()
self.assertEqual(im.shape, self.scanner.resolution.value[-1::-1])
self.assertTupleAlmostEqual(im.metadata[model.MD_POS], exp_pos)
# only one point
self.scanner.resolution.value = (1,1)
im = self.sed.data.get()
self.assertEqual(im.shape, self.scanner.resolution.value[-1::-1])
self.assertTupleAlmostEqual(im.metadata[model.MD_POS], exp_pos)
@skip("faster")
def test_acquire_high_osr(self):
"""
small resolution, but large osr, to force acquisition not by whole array
"""
self.scanner.resolution.value = (256, 200)
self.size = self.scanner.resolution.value
self.scanner.dwellTime.value = self.scanner.dwellTime.range[0] * 1000
expected_duration = self.compute_expected_duration() # about 1 min
start = time.time()
im = self.sed.data.get()
duration = time.time() - start
self.assertEqual(im.shape, self.size[-1:-3:-1])
self.assertGreaterEqual(duration, expected_duration, "Error execution took %f s, less than exposure time %d." % (duration, expected_duration))
self.assertIn(model.MD_DWELL_TIME, im.metadata)
def test_long_dwell_time(self):
"""
one pixel only, but long dwell time (> 4s), which means it uses
duplication rate.
"""
self.scanner.resolution.value = self.scanner.resolution.range[0]
self.size = self.scanner.resolution.value
self.scanner.dwellTime.value = 10 # DPR should be 3
expected_duration = self.compute_expected_duration() # same as dwell time
start = time.time()
im = self.sed.data.get()
duration = time.time() - start
self.assertEqual(im.shape, self.size[::-1])
self.assertGreaterEqual(duration, expected_duration, "Error execution took %f s, less than exposure time %d." % (duration, expected_duration))
self.assertIn(model.MD_DWELL_TIME, im.metadata)
def test_acquire_long_short(self):
"""
test being able to cancel image acquisition if dwell time is too long
"""
self.scanner.resolution.value = (256, 200)
self.size = self.scanner.resolution.value
self.scanner.dwellTime.value = self.scanner.dwellTime.range[0] * 100
expected_duration_l = self.compute_expected_duration() # about 5 s
self.left = 1
start = time.time()
# acquire one long, and change to a short time
self.sed.data.subscribe(self.receive_image)
# time.sleep(0.1) # make sure it has started
self.scanner.dwellTime.value = self.scanner.dwellTime.range[0] # shorten
expected_duration_s = self.compute_expected_duration()
# unsub/sub should always work, as long as there is only one subscriber
self.sed.data.unsubscribe(self.receive_image)
self.sed.data.subscribe(self.receive_image)
self.acq_done.wait(2 + expected_duration_l * 1.1)
duration = time.time() - start
self.assertTrue(self.acq_done.is_set())
self.assertGreaterEqual(duration, expected_duration_s, "Error execution took %f s, less than exposure time %f." % (duration, expected_duration_s))
self.assertLess(duration, expected_duration_l, "Execution took %f s, as much as the long exposure time %f." % (duration, expected_duration_l))
def test_acquire_flow(self):
expected_duration = self.compute_expected_duration()
number = 5
self.left = number
self.sed.data.subscribe(self.receive_image)
self.acq_done.wait(number * (2 + expected_duration * 1.1)) # 2s per image should be more than enough in any case
self.assertEqual(self.left, 0)
def test_acquire_with_va(self):
"""
Change some settings before and while acquiring
"""
dwell = self.scanner.dwellTime.range[0] * 2
self.scanner.dwellTime.value = dwell
self.scanner.resolution.value = self.scanner.resolution.range[1] # test big image
self.size = self.scanner.resolution.value
expected_duration = self.compute_expected_duration()
number = 3
self.left = number
self.sed.data.subscribe(self.receive_image)
# change the attribute
time.sleep(expected_duration)
dwell = self.scanner.dwellTime.range[0]
self.scanner.dwellTime.value = dwell
expected_duration = self.compute_expected_duration()
self.acq_done.wait(number * (2 + expected_duration * 1.1)) # 2s per image should be more than enough in any case
self.sed.data.unsubscribe(self.receive_image) # just in case it failed
self.assertEqual(self.left, 0)
def test_df_fast_sub_unsub(self):
"""
Test the dataflow on a very fast cycle subscribing/unsubscribing
SEMComedi had a bug causing the threads not to start again
"""
self.scanner.dwellTime.value = self.scanner.dwellTime.range[0]
number = 10
expected_duration = self.compute_expected_duration()
self.left = 10000 # don't unsubscribe automatically
for i in range(number):
self.sed.data.subscribe(self.receive_image)
time.sleep(0.001 * i)
self.sed.data.unsubscribe(self.receive_image)
# now this one should work
self.sed.data.subscribe(self.receive_image)
time.sleep(expected_duration * 2) # make sure we received at least one image
self.sed.data.unsubscribe(self.receive_image)
self.assertLessEqual(self.left, 10000 - 1)
def test_df_alternate_sub_unsub(self):
"""
Test the dataflow on a quick cycle subscribing/unsubscribing
Andorcam3 had a real bug causing deadlock in this scenario
"""
self.scanner.dwellTime.value = 10e-6
number = 5
expected_duration = self.compute_expected_duration()
self.left = 10000 + number # don't unsubscribe automatically
for i in range(number):
self.sed.data.subscribe(self.receive_image)
time.sleep(expected_duration * 1.2) # make sure we received at least one image
self.sed.data.unsubscribe(self.receive_image)
# if it has acquired a least 5 pictures we are already happy
self.assertLessEqual(self.left, 10000)
def receive_image(self, dataflow, image):
"""
callback for df of test_acquire_flow()
"""
self.assertEqual(image.shape, self.size[-1:-3:-1])
self.assertIn(model.MD_DWELL_TIME, image.metadata)
self.acq_dates[0].add(image.metadata[model.MD_ACQ_DATE])
# print "Received an image"
self.left -= 1
if self.left <= 0:
dataflow.unsubscribe(self.receive_image)
self.acq_done.set()
def test_focus(self):
"""
Check it's possible to change the focus
"""
pos = self.focus.position.value
f = self.focus.moveRel({"z": 1e-3}) # 1 mm
f.result()
self.assertNotEqual(self.focus.position.value, pos)
self.sed.data.get()
f = self.focus.moveRel({"z":-10e-3}) # 10 mm
f.result()
self.assertNotEqual(self.focus.position.value, pos)
self.sed.data.get()
# restore original position
f = self.focus.moveAbs(pos)
f.result()
self.assertEqual(self.focus.position.value, pos)
if __name__ == "__main__":
unittest.main()
| gstiebler/odemis | src/odemis/driver/test/simsem_test.py | Python | gpl-2.0 | 16,784 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import subprocess
import time
import uuid
import pytest
from google.cloud import storage
WAIT_TIME = 180
ARTIFACTS_BUCKET = os.environ['EXAMPLE_ZOO_ARTIFACTS_BUCKET']
PROJECT_ID = os.environ['EXAMPLE_ZOO_PROJECT_ID']
SUBMIT_SCRIPTS = ['submit_27.sh', 'submit_35.sh']
@pytest.fixture(scope='session')
def gcs_bucket_prefix():
# Create a temporary prefix for storing the artifacts.
storage_client = storage.Client()
bucket = storage_client.get_bucket(ARTIFACTS_BUCKET)
prefix = os.path.join('example_zoo_artifacts', str(uuid.uuid4()))
yield (bucket, prefix)
# Clean up after sleeping for another minute.
time.sleep(120)
for blob in bucket.list_blobs(prefix=prefix):
blob.delete()
@pytest.mark.parametrize('submit_script', SUBMIT_SCRIPTS)
def test_latent_dirichlet_allocation_edward2(gcs_bucket_prefix, submit_script):
bucket, prefix = gcs_bucket_prefix
subprocess_env = os.environ.copy()
subprocess_env['EXAMPLE_ZOO_ARTIFACTS_BUCKET'] = 'gs://{}/{}'.format(os.environ['EXAMPLE_ZOO_ARTIFACTS_BUCKET'], prefix)
out = subprocess.check_output(['bash', submit_script], env=subprocess_env)
out_str = out.decode('ascii')
assert 'QUEUED' in out_str, 'Job submission failed: {}'.format(out_str)
# Get jobId so we can cancel the job easily.
job_id = re.match(r'jobId: (.+)\b', out_str).group(1)
time.sleep(WAIT_TIME)
# Cancel the job.
subprocess.check_call(['gcloud', 'ai-platform', 'jobs', 'cancel', job_id, '--project', PROJECT_ID])
blob_names = [blob.name for blob in bucket.list_blobs(prefix=prefix)]
out_str = ' '.join(blob_names)
assert '.data-00000-of-00001' in out_str, 'Artifact ".data-00000-of-00001" not found in bucket {} with prefix {} after {} seconds.'.format(bucket, prefix, WAIT_TIME)
| GoogleCloudPlatform/ml-on-gcp | example_zoo/tensorflow/probability/latent_dirichlet_allocation_edward2/cmle_latent_dirichlet_allocation_edward2_test.py | Python | apache-2.0 | 2,399 |
#!/usr/bin/python
import glob
import os
import shutil
import subprocess
import sys
import yaml
def create_role(role):
ret = subprocess.check_output(
'ansible-galaxy init {}'.format(role).split())
if not ret.strip().endswith('created successfully'):
raise Exception('could not create role "{}"'.format(role))
def get_metadata(role):
try:
main = open(os.path.join(role, 'meta/main.yml'))
return yaml.load(main)
except IOError:
return {}
def ensure_meta(role):
"""Ensure the role has a meta directory"""
try:
os.makedirs(os.path.join(role, 'meta'))
except OSError:
pass
def set_metadata(role, metadata):
ensure_meta(role)
new_main = os.path.join(role, 'meta/main.yml.new')
orig_main = os.path.join(role, 'meta/main.yml')
with open(new_main, 'w') as out:
yaml.dump(metadata, out, default_flow_style=False, explicit_start=True)
os.rename(new_main, orig_main)
def add_dependency(src_role, target_role):
"""Add metadata saying that 'target_role' depends on 'src_role'"""
md = get_metadata(target_role)
deps = md.setdefault('dependencies', [])
deps.append(os.path.join(target_role, 'roles', src_role))
set_metadata(target_role, md)
def sub_roles(role):
try:
return glob.glob(os.path.join(role, 'roles/*'))
except OSError:
return []
def fix_dependency(role, for_destination):
"""Fix the sub-role dependency.
Dependency on a sub-role has to be changed once we move the base
role.
"""
metadata = get_metadata(role)
deps = metadata.setdefault('dependencies', [])
def f(dep):
if dep.startswith(role):
return os.path.join(for_destination, 'roles', dep)
else:
return dep
metadata['dependencies'] = [f(dep) for dep in deps]
set_metadata(role, metadata)
def fix_dependencies(src_role, for_destination):
for role in sub_roles(src_role):
fix_dependencies(role, for_destination)
fix_dependency(src_role, for_destination)
def move(src_role, target_role, copy=False):
op = shutil.copytree if copy else shutil.move
try:
os.makedirs(os.path.join(target_role, 'roles'))
except OSError:
pass
fix_dependencies(src_role, for_destination=target_role)
op(src_role, os.path.join(target_role, 'roles', src_role))
add_dependency(src_role, target_role)
def concat(roles, into, copy=False):
create_role(into)
for role in roles:
move(role, target_role=into, copy=copy)
def test():
roles = ['foo', 'bar', 'spam']
try:
for role in roles:
create_role(role)
move('foo', 'bar')
assert get_metadata('bar')['dependencies'] == ['bar/roles/foo']
move('bar', 'spam')
assert get_metadata('spam')['dependencies'] == ['spam/roles/bar']
assert get_metadata('spam/roles/bar')['dependencies'] == ['spam/roles/bar/roles/foo']
finally:
for role in roles:
shutil.rmtree(role, ignore_errors=True)
def main():
roles_path = None
if roles_path is not None:
os.chdir(roles_path)
concat([sys.argv[1], sys.argv[2]], into=sys.argv[3])
if __name__ == '__main__':
main()
| waltermoreira/dockeransible | app_builder/app_builder_image/concat_roles.py | Python | mit | 3,259 |
import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_glm
from h2o_test import verboseprint, dump_json, OutputObj
from tabulate import tabulate
from h2o_xl import Key, Assign
def define_params():
paramDict = {
# FIX! when is this needed? redundant for binomial?
'balance_classes': [True, False, None],
'class_sampling_factors': [0.1, 0.2, "0.1, 0.2", None],
'max_after_balance_size': [100.0, 1000.0, None],
# 'solver': ['ADMM', 'L_BFGS', None],
'solver': ['L_BFGS', None],
'max_iterations': [1, 3, 15, None],
'drop_na20_cols': [None, 0, 1],
'standardize': [None, 0, 1],
'nlambdas': [None, 1,2,5], # number of lambdas to be used in a search
'lambda_min_ratio': [None, .1, 0.9], # ratio of lambda max. Evidently can't take 1 ?
'lambda': [0, 1e-8, 1e-4, 1e-3],
'lambda_search': [None, 0, 1], # FIX! what if lambda is set when lambda_search=1
# 'use_all_factor_levels': [None, 0, 1],
'alpha': [0,0.2,0.4],
'family': ['family_default', 'gaussian', 'binomial', 'poisson', None],
'link': ['logit', 'log', 'inverse', 'tweedie', None],
'ignored_columns': [1,'"C1"','1,2','"C1","C2"'],
'standardize': [None, 0,1],
# 'intercept': [None, 0, 1],
# 'non_negative': [None, 0,1], # require coefficents to be non-negative
# 'variable_importances': [None, 0, 1],
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
print "hardwiring seed for now"
SEED = h2o.setup_random_seed(seed=6418304027311682180)
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM_params_rand2(self):
importFolderPath = "covtype"
csvFilename = "covtype.20k.data"
hex_key = "covtype20k.hex"
binomial_key = "covtype20k.b.hex"
b = Key(hex_key)
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key,
check_header=1, timeoutSecs=180, doSummary=False)
## columnTypeDict = {54: 'Enum'}
columnTypeDict = None
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=binomial_key,
columnTypeDict=columnTypeDict,
check_header=1, timeoutSecs=180, doSummary=False)
# don't have to make it enum, if 0/1 (can't operate on enums like this)
# make 1-7 go to 0-6. 0 isn't there.
Assign(b[:,54], b[:,54]-1)
# make 1 thru 6 go to 1
Assign(b[:,54], b[:,54]!=0)
# now we have just 0 and 1
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
expected = []
allowedDelta = 0
# loop, to see if we get same centers
labelListUsed = list(labelList)
numColsUsed = numCols
paramDict = define_params()
for trial in range(5):
# family [u'gaussian', u'binomial', u'poisson', u'gamma', u'tweedie']
# link [u'family_default', u'identity', u'logit', u'log', u'inverse', u'tweedie']
# can we do classification with probabilities?
# are only lambda and alpha grid searchable?
# params is mutable. This is default.
parameters = {
'response_column': 'C55',
'alpha': 0.1,
# 'lambda': 1e-4,
'lambda': 0,
}
h2o_glm.pickRandGlmParams(paramDict, parameters)
if 'family' not in parameters or parameters['family']=='binomial':
bHack = binomial_key
else:
bHack = hex_key
co = h2o_cmd.runSummary(key=binomial_key, column=54)
print "binomial_key summary:", co.label, co.type, co.missing_count, co.domain, sum(co.histogram_bins)
co = h2o_cmd.runSummary(key=hex_key, column=54)
print "hex_key summary:", co.label, co.type, co.missing_count, co.domain, sum(co.histogram_bins)
# fix stupid params
fixList = ['alpha', 'lambda', 'ignored_columns', 'class_sampling_factors']
for f in fixList:
if f in parameters:
parameters[f] = "[%s]" % parameters[f]
model_key = 'rand_glm.hex'
bmResult = h2o.n0.build_model(
algo='glm',
model_id=model_key,
training_frame=bHack,
parameters=parameters,
timeoutSecs=10)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
h2o_glm.simpleCheckGLM(self, model, parameters, labelList, labelListUsed, allowNaN=True)
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
# FIX! when is this legal
doClassification = False
if doClassification:
mcms = OutputObj({'data': cmm.max_criteria_and_metric_scores.data}, 'mcms')
m1 = mcms.data[1:]
h0 = mcms.data[0]
print "\nmcms", tabulate(m1, headers=h0)
if doClassification:
thms = OutputObj(cmm.thresholds_and_metric_scores, 'thms')
cmms = OutputObj({'cm': cmm.confusion_matrices}, 'cmms')
if 1==0:
print ""
for i,c in enumerate(cmms.cm):
print "\ncmms.cm[%s]" % i, tabulate(c)
print ""
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult['model_metrics'][0], 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
# h2o_cmd.runStoreView()
if __name__ == '__main__':
h2o.unit_main()
| YzPaul3/h2o-3 | py2/testdir_single_jvm/test_GLM_params_rand2.py | Python | apache-2.0 | 6,489 |
import pytest
from amoco.system.pe import PE
def test_parser_PE(samples):
for f in samples:
if f[-4:]=='.exe':
p = PE(f)
| chubbymaggie/amoco | tests/test_system_pe.py | Python | gpl-2.0 | 148 |
#!/usr/bin/env python
import xml.sax
import xml.sax.handler
import sys
import csv
class Handler(xml.sax.handler.ContentHandler):
def __init__(self, csv_writer, cols):
self.csv_writer = csv_writer
self.cols = cols
def startElement(self, name, attrs):
if name == u'marker':
self.csv_writer.writerow(map(lambda c: attrs[c].encode("UTF-8"), self.cols))
cols = [u"id", u"stopid", u"lat", u"lng", u"name"]
input_filename = sys.argv[1]
input_file = open(input_filename, "rb")
output_file = sys.stdout
csv_writer = csv.writer(output_file)
csv_writer.writerow(map(lambda c: c.encode("UTF-8"), cols))
xml.sax.parse(input_file, Handler(csv_writer, cols))
output_file.close()
input_file.close()
| dlitz/opendatanow | support/octranspo-stops-to-csv.py | Python | gpl-3.0 | 733 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.eager import context
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.estimator.export import export as export_helpers
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.summary import summary
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import device_setter
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.training import warm_starting_util
from tensorflow.python.util import compat
from tensorflow.python.util import compat_internal
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import estimator_export
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'self', 'config'])
@estimator_export('estimator.Estimator')
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The `Estimator` object wraps a model which is specified by a `model_fn`,
which, given inputs and a number of other parameters, returns the ops
necessary to perform training, evaluation, or predictions.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `tf.estimator.RunConfig` object containing
information about the execution environment. It is passed on to the
`model_fn`, if the `model_fn` has a parameter named "config" (and input
functions in the same manner). If the `config` parameter is not passed, it is
instantiated by the `Estimator`. Not passing config means that defaults useful
for local execution are used. `Estimator` makes config available to the model
(for instance, to allow specialization based on the number of workers
available), and also uses some of its fields to control internals, especially
regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. `Estimator` only passes params along, it does
not inspect it. The structure of `params` is therefore entirely up to the
developer.
None of `Estimator`'s methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use `model_fn` to configure
the base class, and may add methods implementing specialized functionality.
@compatibility(eager)
Calling methods of `Estimator` will work while eager execution is enabled.
However, the `model_fn` and `input_fn` is not executed eagerly, `Estimator`
will switch to graph model before calling all user-provided functions (incl.
hooks), so their code has to be compatible with graph mode execution. Note
that `input_fn` code using `tf.data` generally works in both graph and eager
modes.
@end_compatibility
"""
def __init__(self, model_fn, model_dir=None, config=None, params=None,
warm_start_from=None):
"""Constructs an `Estimator` instance.
See @{$estimators} for more information. To warm-start an `Estimator`:
```python
estimator = tf.estimator.DNNClassifier(
feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
hidden_units=[1024, 512, 256],
warm_start_from="/path/to/checkpoint/dir")
```
For more details on warm-start configuration, see
`tf.estimator.WarmStartSettings`.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `tf.Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `tf.Tensor` or `dict` of same (for multi-head models).
If mode is @{tf.estimator.ModeKeys.PREDICT}, `labels=None` will
be passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `tf.estimator.ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your `model_fn` based on
configuration such as `num_ps_replicas`, or `model_dir`.
* Returns:
`tf.estimator.EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into an estimator to
continue training a previously saved model. If `PathLike` object, the
path will be resolved. If `None`, the model_dir in `config` will be used
if set. If both are set, they must be same. If both are `None`, a
temporary directory will be used.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings`
object to fully configure warm-starting. If the string
filepath is provided instead of a
`tf.estimator.WarmStartSettings`, then all variables are
warm-started, and it is assumed that vocabularies
and `tf.Tensor` names are unchanged.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
Estimator._assert_members_are_not_overridden(self)
config = maybe_overwrite_model_dir_and_session_config(config, model_dir)
self._config = config
# The distribute field contains an instance of DistributionStrategy.
self._train_distribution = self._config.train_distribute
self._eval_distribution = self._config.eval_distribute
# Model directory.
self._model_dir = self._config.model_dir
self._session_config = self._config.session_config
logging.info('Using config: %s', str(vars(self._config)))
self._device_fn = (
self._config.device_fn or _get_replica_device_setter(self._config))
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = copy.deepcopy(params or {})
# pylint: disable=protected-access
self._warm_start_settings = _get_default_warm_start_settings(
warm_start_from)
# pylint: enable=protected-access
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
@property
def model_fn(self):
"""Returns the `model_fn` which is bound to `self.params`.
Returns:
The `model_fn` with following signature:
`def model_fn(features, labels, mode, config)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config)
return public_model_fn
# TODO(ispir): support a list of names
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string or a list of string, name of the tensor.
Returns:
Numpy array - value of the tensor.
Raises:
ValueError: If the `Estimator` has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
with context.graph_mode():
return training.load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
Raises:
ValueError: If the `Estimator` has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
with context.graph_mode():
return [name for name, _ in training.list_variables(self.model_dir)]
def latest_checkpoint(self):
"""Finds the filename of the latest saved checkpoint file in `model_dir`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was
found.
"""
with context.graph_mode():
return checkpoint_management.latest_checkpoint(self.model_dir)
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
"""Trains a model given training data `input_fn`.
Args:
input_fn: A function that provides input data for training as minibatches.
See @{$premade_estimators#create_input_functions} for more information.
The function should construct and return one of the following: * A
`tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple
`(features, labels)` with same constraints as below. * A tuple
`(features, labels)`: Where `features` is a `tf.Tensor` or a dictionary
of string feature name to `Tensor` and `labels` is a `Tensor` or a
dictionary of string label name to `Tensor`. Both `features` and
`labels` are consumed by `model_fn`. They should satisfy the expectation
of `model_fn` from inputs.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
steps: Number of steps for which to train the model. If `None`, train
forever or train until `input_fn` generates the `tf.errors.OutOfRange`
error or `StopIteration` exception. `steps` works incrementally. If you
call two times `train(steps=10)` then training occurs in total 20 steps.
If `OutOfRange` or `StopIteration` occurs in the middle, training stops
before 20 steps. If you don't want to have incremental behavior please
set `max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until `input_fn` generates the
`tf.errors.OutOfRange` error or `StopIteration` exception. If set,
`steps` must be `None`. If `OutOfRange` or `StopIteration` occurs in the
middle, training stops before `max_steps` steps. Two calls to
`train(steps=100)` means 200 training iterations. On the other hand, two
calls to `train(max_steps=100)` means that the second call will not do
any iteration since first call did all 100 steps.
saving_listeners: list of `CheckpointSaverListener` objects. Used for
callbacks that run immediately before or after checkpoint savings.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps <= 0`.
"""
with context.graph_mode():
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_train_steps_to_hooks(steps, max_steps))
saving_listeners = _check_listeners_type(saving_listeners)
loss = self._train_model(input_fn, hooks, saving_listeners)
logging.info('Loss for final step: %s.', loss)
return self
def _convert_train_steps_to_hooks(self, steps, max_steps):
"""Create hooks to run correct number of steps in training.
Args:
steps: number of steps to run during training.
max_steps: maximum number of steps to be run during training. It'll be
the maximum number of steps the model will train to after restoring
from checkpoint even across multiple estimator.train calls.
Returns:
List of hooks to be passed to the estimator.
"""
if steps is not None or max_steps is not None:
if self._train_distribution:
steps_per_run = getattr(self._train_distribution, 'steps_per_run', 1)
if steps_per_run > 1:
return [basic_session_run_hooks._MultiStepStopAtStepHook( # pylint: disable=protected-access
steps, max_steps, steps_per_run)]
return [training.StopAtStepHook(steps, max_steps)]
else:
return []
def eval_dir(self, name=None):
"""Shows the directory name where evaluation metrics are dumped.
Args:
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A string which is the path of directory contains evaluation metrics.
"""
return os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data `input_fn`.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`tf.errors.OutOfRangeError`
or
`StopIteration`).
Args:
input_fn: A function that constructs the input data for evaluation. See
@{$premade_estimators#create_input_functions} for more information. The
function should construct and return one of the following: * A
`tf.data.Dataset` object: Outputs of `Dataset` object must be a tuple
`(features, labels)` with same constraints as below. * A tuple
`(features, labels)`: Where `features` is a `tf.Tensor` or a dictionary
of string feature name to `Tensor` and `labels` is a `Tensor` or a
dictionary of string label name to `Tensor`. Both `features` and
`labels` are consumed by `model_fn`. They should satisfy the expectation
of `model_fn` from inputs.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, evaluation is run with newly initialized `Variables`
instead of ones restored from checkpoint.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed.
Raises:
ValueError: If `steps <= 0`.
ValueError: If no model has been trained, namely `model_dir`, or the
given `checkpoint_path` is empty.
"""
with context.graph_mode():
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_eval_steps_to_hooks(steps))
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not latest_path:
logging.info('Could not find trained model in model_dir: {}, running '
'initialization to evaluate.'.format(self._model_dir))
checkpoint_path = latest_path
def _evaluate():
(scaffold, update_op, eval_dict, all_hooks) = (
self._evaluate_build_graph(input_fn, hooks, checkpoint_path))
return self._evaluate_run(
checkpoint_path=checkpoint_path,
scaffold=scaffold,
update_op=update_op,
eval_dict=eval_dict,
all_hooks=all_hooks,
output_dir=self.eval_dir(name))
with ops.Graph().as_default():
# TODO(priyag): Support distributed eval on TPUs.
if (self._eval_distribution
and self._eval_distribution.__class__.__name__ != 'TPUStrategy'):
with self._eval_distribution.scope():
return _evaluate()
else:
return _evaluate()
def _convert_eval_steps_to_hooks(self, steps):
if steps is None:
return []
if steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
return [evaluation._StopAfterNEvalsHook(num_evals=steps)] # pylint: disable=protected-access
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
"""Yields predictions for given features.
Args:
input_fn: A function that constructs the features. Prediction continues
until `input_fn` raises an end-of-input exception
(`tf.errors.OutOfRangeError` or `StopIteration`).
See @{$premade_estimators#create_input_functions} for more
information. The function should construct and return one of
the following:
* A `tf.data.Dataset` object: Outputs of `Dataset` object must have
same constraints as below.
* features: A `tf.Tensor` or a dictionary of string feature name to
`Tensor`. features are consumed by `model_fn`. They should satisfy
the expectation of `model_fn` from inputs.
* A tuple, in which case the first item is extracted as features.
predict_keys: list of `str`, name of the keys to predict. It is used if
the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If
`predict_keys` is used then rest of the predictions will be filtered
from the dictionary. If `None`, returns all.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, prediction is run with newly initialized `Variables`
instead of ones restored from checkpoint.
yield_single_examples: If `False`, yields the whole batch as returned by
the `model_fn` instead of decomposing the batch into individual
elements. This is useful if `model_fn` returns some tensors whose first
dimension is not equal to the batch size.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: Could not find a trained model in `model_dir`.
ValueError: If batch length of predictions is not the same and
`yield_single_examples` is `True`.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`tf.estimator.EstimatorSpec.predictions` is not a `dict`.
"""
with context.graph_mode():
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = checkpoint_management.latest_checkpoint(
self._model_dir)
if not checkpoint_path:
logging.info('Could not find trained model in model_dir: {}, running '
'initialization to predict.'.format(self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(g)
features, input_hooks = self._get_features_from_input_fn(
input_fn, model_fn_lib.ModeKeys.PREDICT)
estimator_spec = self._call_model_fn(
features, None, model_fn_lib.ModeKeys.PREDICT, self.config)
# Call to warm_start has to be after model_fn is called.
self._maybe_warm_start(checkpoint_path)
predictions = self._extract_keys(
estimator_spec.predictions, predict_keys)
all_hooks = list(input_hooks)
all_hooks.extend(hooks)
all_hooks.extend(list(estimator_spec.prediction_hooks or []))
with training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
master=self._config.master,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=all_hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not yield_single_examples:
yield preds_evaluated
elif not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
"""Asserts members of `Estimator` are not overridden."""
# TPUEstimator is special cased (owned by TF).
if self.__class__.__name__ == 'TPUEstimator':
return
allowed_overrides = set([
'_create_and_assert_global_step',
'_tf_api_names', '_tf_api_names_v1', '_estimator_api_names',
'_estimator_api_names_v1', '_estimator_api_constants',
'_estimator_api_constants_v1',
])
estimator_members = set([m for m in Estimator.__dict__.keys()
if not m.startswith('__')])
subclass_members = set(self.__class__.__dict__.keys())
common_members = estimator_members & subclass_members - allowed_overrides
overridden_members = [
m for m in common_members
if Estimator.__dict__[m] != self.__class__.__dict__[m]]
if overridden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(self.__class__, overridden_members))
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a `SavedModel` into the given dir.
For a detailed guide, see
@{$saved_model#using_savedmodel_with_estimators$Using SavedModel with
Estimators}.
This method builds a new graph by first calling the
`serving_input_receiver_fn` to obtain feature `Tensor`s, and then calling
this `Estimator`'s `model_fn` to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given `export_dir_base`, and writes
a `SavedModel` into it containing a single `tf.MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the `export_outputs` dict returned from the `model_fn`, named
using
the same keys. One of these keys is always
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,
indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`tf.estimator.export.ExportOutput`s, and the inputs are always the input
receivers provided by
the `serving_input_receiver_fn`.
Extra assets may be written into the `SavedModel` via the `assets_extra`
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
serving_input_receiver_fn: A function that takes no argument and returns a
`tf.estimator.export.ServingInputReceiver` or
`tf.estimator.export.TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The string path to the exported directory.
Raises:
ValueError: if no `serving_input_receiver_fn` is provided, no
`export_outputs`
are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
return self._export_saved_model_for_mode(
export_dir_base,
serving_input_receiver_fn,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs,
mode=model_fn_lib.ModeKeys.PREDICT)
def _export_saved_model_for_mode(
self, export_dir_base, input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False,
mode=model_fn_lib.ModeKeys.PREDICT):
# pylint: disable=line-too-long
"""Exports a single train/eval/predict graph as a `SavedModel`.
This method is a wrapper for `_export_all_saved_models`, and wraps a raw
`input_receiver_fn` in a dictionary to pass in to that function.
See `_export_all_saved_models` for full docs.
See `tf.contrib.estimator.export_saved_model_for_mode` for the currently
exposed version of this function.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
input_receiver_fn: a function that takes no argument and returns the
appropriate subclass of `InputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
mode: `tf.estimator.ModeKeys` value indicating with mode will be exported.
Returns:
The string path to the exported directory.
Raises:
ValueError: if `input_receiver_fn` is `None`, no `export_outputs`
are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
if not input_receiver_fn:
raise ValueError('An input_receiver_fn must be defined.')
input_receiver_fn_map = {mode: input_receiver_fn}
return self._export_all_saved_models(
export_dir_base,
input_receiver_fn_map,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs)
def _export_all_saved_models(
self, export_dir_base, input_receiver_fn_map,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports a `SavedModel` containing `tf.MetaGraphDefs` for each requested mode.
See `tf.contrib.estimator.export_all_saved_models` for the currently
exposed version of this function.
For each mode passed in via the `input_receiver_fn_map`,
this method builds a new graph by calling the `input_receiver_fn` to obtain
feature and label `Tensor`s. Next, this method calls the `Estimator`'s
`model_fn` in the passed mode to generate the model graph based on
those features and labels, and restores the given checkpoint
(or, lacking that, the most recent checkpoint) into the graph.
Only one of the modes is used for saving variables to the `SavedModel`
(order of preference: @{tf.estimator.ModeKeys#TRAIN$TRAIN},
@{tf.estimator.ModeKeys#EVAL$EVAL}, then
@{tf.estimator.ModeKeys#PREDICT$PREDICT}), such that up to three
`tf.MetaGraphDefs` are saved with a single set of variables in a single
`SavedModel` directory.
For the variables and `tf.MetaGraphDefs`, a timestamped export directory
below
`export_dir_base`, and writes a `SavedModel` into it containing
the `tf.MetaGraphDef` for the given mode and its associated signatures.
For prediction, the exported `MetaGraphDef` will provide one `SignatureDef`
for each element of the `export_outputs` dict returned from the `model_fn`,
named using the same keys. One of these keys is always
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,
indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`tf.estimator.export.ExportOutput`s, and the inputs are always the input
receivers provided by
the `serving_input_receiver_fn`.
For training and evaluation, the `train_op` is stored in an extra
collection,
and loss, metrics, and predictions are included in a `SignatureDef` for the
mode in question.
Extra assets may be written into the `SavedModel` via the `assets_extra`
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to
`input_receiver_fn` mappings, where the `input_receiver_fn` is a
function that takes no arguments and returns the appropriate subclass of
`InputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
A dict of `tf.estimator.ModeKeys` value to string path for each exported
directory.
Raises:
ValueError: if any `input_receiver_fn` is `None`, no `export_outputs`
are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
# TODO(b/65561022): Consider allowing multiple input_receiver_fns per mode.
with context.graph_mode():
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = checkpoint_management.latest_checkpoint(
self._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s." % self._model_dir)
export_dir = export_helpers.get_timestamped_export_dir(export_dir_base)
temp_export_dir = export_helpers.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
save_variables = True
# Note that the order in which we run here matters, as the first
# mode we pass through will be used to save the variables. We run TRAIN
# first, as that is also the mode used for checkpoints, and therefore
# we are not likely to have vars in PREDICT that are not in the checkpoint
# created by TRAIN.
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.TRAIN):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.TRAIN)
save_variables = False
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.EVAL):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.EVAL)
save_variables = False
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.PREDICT):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.PREDICT)
save_variables = False
if save_variables:
raise ValueError('No valid modes for exporting found. Got {}.'.format(
input_receiver_fn_map.keys()))
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True):
# pylint: disable=line-too-long
"""Loads variables and adds them along with a `tf.MetaGraphDef` for saving.
Args:
builder: instance of `tf.saved_modle.builder.SavedModelBuilder` that will
be used for saving.
input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to
`input_receiver_fn` mappings, where the `input_receiver_fn` is a
function that takes no argument and returns the appropriate subclass of
`InputReceiver`.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_variables: bool, whether variables should be saved. If `False`, just
the `tf.MetaGraphDef` will be saved. Note that `save_variables` should
only be `True` for the first call to this function, and the
`SavedModelBuilder` will raise an error if that is not the case.
mode: `tf.estimator.ModeKeys` value indicating which mode will be
exported.
export_tags: The set of tags with which to save `tf.MetaGraphDef`. If
`None`, a default set will be selected to matched the passed mode.
check_variables: bool, whether to check the checkpoint has all variables.
Raises:
ValueError: if `save_variables` is `True` and `check_variable` is `False`.
"""
# pylint: enable=line-too-long
if export_tags is None:
export_tags = model_fn_lib.EXPORT_TAG_MAP[mode]
input_receiver_fn = input_receiver_fn_map[mode]
with ops.Graph().as_default() as g:
self._create_and_assert_global_step(g)
random_seed.set_random_seed(self._config.tf_random_seed)
input_receiver = input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=input_receiver.features,
labels=getattr(input_receiver, 'labels', None),
mode=mode,
config=self.config)
export_outputs = self._get_export_outputs_for_spec(estimator_spec)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = export_helpers.build_all_signature_defs(
input_receiver.receiver_tensors,
export_outputs,
getattr(input_receiver, 'receiver_tensors_alternatives', None),
serving_only=(mode == model_fn_lib.ModeKeys.PREDICT))
with tf_session.Session(config=self._session_config) as session:
if estimator_spec.scaffold.local_init_op is not None:
local_init_op = estimator_spec.scaffold.local_init_op
else:
local_init_op = monitored_session.Scaffold.default_local_init_op()
# This saver will be used both for restoring variables now,
# and in saving out the metagraph below. This ensures that any
# Custom Savers stored with the Scaffold are passed through to the
# SavedModel for restore later.
graph_saver = estimator_spec.scaffold.saver or saver.Saver(sharded=True)
if save_variables and not check_variables:
raise ValueError('If `save_variables` is `True, `check_variables`'
'must not be `False`.')
if check_variables:
try:
graph_saver.restore(session, checkpoint_path)
except errors.NotFoundError as e:
msg = ('Could not load all requested variables from checkpoint. '
'Please make sure your model_fn does not expect variables '
'that were not saved in the checkpoint.\n\n'
'Encountered error with mode `{}` while restoring '
'checkpoint from: `{}`. Full Traceback:\n\n{}').format(
mode, checkpoint_path, e)
raise ValueError(msg)
# We add the train op explicitly for now, so that we don't have to
# change the Builder public interface. Note that this is a no-op
# for prediction, where train_op is None.
builder._add_train_op(estimator_spec.train_op) # pylint: disable=protected-access
meta_graph_kwargs = dict(
tags=export_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
strip_default_attrs=strip_default_attrs,
legacy_init_op=local_init_op,
saver=graph_saver)
if save_variables:
builder.add_meta_graph_and_variables(
session, **meta_graph_kwargs)
else:
builder.add_meta_graph(**meta_graph_kwargs)
def _get_export_outputs_for_spec(self, estimator_spec):
"""Given an `EstimatorSpec`, determine what our export outputs should be.
`EstimatorSpecs` contains `export_outputs` that are used for serving, but
for
training and eval graphs, we must wrap the tensors of interest in
appropriate `tf.estimator.export.ExportOutput` objects.
Args:
estimator_spec: `tf.estimator.EstimatorSpec` object that will be exported.
Returns:
a dict mapping `export_output_name` to `tf.estimator.export.ExportOutput`
object.
Raises:
ValueError: if an appropriate `ExportOutput` cannot be found for the
passed `EstimatorSpec.mode`
"""
mode = estimator_spec.mode
if mode == model_fn_lib.ModeKeys.PREDICT:
outputs = estimator_spec.export_outputs
else:
if mode == model_fn_lib.ModeKeys.TRAIN:
output_class = export_output.TrainOutput
elif mode == model_fn_lib.ModeKeys.EVAL:
output_class = export_output.EvalOutput
else:
raise ValueError(
'Export output type not found for mode: {}'.format(mode))
export_out = output_class(
loss=estimator_spec.loss,
predictions=estimator_spec.predictions,
metrics=estimator_spec.eval_metric_ops)
outputs = {mode: export_out}
return outputs
def _get_features_from_input_fn(self, input_fn, mode):
"""Extracts the `features` from return values of `input_fn`."""
result = self._call_input_fn(input_fn, mode)
result, _, hooks = estimator_util.parse_input_fn_result(result)
self._validate_features_in_predict_input(result)
return result, hooks
def _validate_features_in_predict_input(self, result):
if not _has_dataset_or_queue_runner(result):
logging.warning('Input graph does not use tf.data.Dataset or contain a '
'QueueRunner. That means predict yields forever. '
'This is probably a mistake.')
def _get_features_and_labels_from_input_fn(self, input_fn, mode,
distribution=None):
"""Extracts the `features` and labels from return values of `input_fn`."""
if distribution is not None:
result = distribution.distribute_dataset(
lambda: self._call_input_fn(input_fn, mode))
else:
result = self._call_input_fn(input_fn, mode)
return estimator_util.parse_input_fn_result(result)
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length than others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _create_global_step(self, graph):
"""Creates the global step tensor in graph.
The global step tensor must be an integer type with name 'global_step' and
be added to the collection @{tf.GraphKeys#GLOBAL_STEP$GLOBAL_STEP}.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `tf.Tensor`.
"""
return training.create_global_step(graph)
def _create_and_assert_global_step(self, graph):
"""Creates and asserts properties of the global step.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `tf.Tensor`.
"""
step = self._create_global_step(graph)
assert step == training.get_global_step()
assert step.dtype.is_integer
return step
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: `tf.estimator.ModeKeys`
Returns:
The return value of the passed `input_fn`, which should be one of:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple `(features, labels)` with same constraints as below.
* A tuple `(features, labels)`: Where `features` is a `Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
Raises:
ValueError: if `input_fn` takes invalid arguments.
"""
input_fn_args = function_utils.fn_args(input_fn)
kwargs = {}
if 'mode' in input_fn_args:
kwargs['mode'] = mode
if 'params' in input_fn_args:
kwargs['params'] = self.params
if 'config' in input_fn_args:
kwargs['config'] = self.config
with ops.device('/cpu:0'):
return input_fn(**kwargs)
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: `tf.estimator.ModeKeys`
config: `tf.estimator.RunConfig`
Returns:
An `tf.estimator.EstimatorSpec` object.
Raises:
ValueError: if `model_fn` returns invalid objects.
"""
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
logging.info('Calling model_fn.')
model_fn_results = self._model_fn(features=features, **kwargs)
logging.info('Done calling model_fn.')
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks, saving_listeners):
if self._train_distribution:
return self._train_model_distributed(input_fn, hooks, saving_listeners)
else:
return self._train_model_default(input_fn, hooks, saving_listeners)
def _train_model_default(self, input_fn, hooks, saving_listeners):
"""Initiate training with `input_fn`, without `DistributionStrategies`.
Args:
input_fn: A function that provides input data for training as minibatches.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
saving_listeners: list of `tf.train.CheckpointSaverListener` objects. Used
for callbacks that run immediately before or after checkpoint savings.
Returns:
Loss from training
"""
worker_hooks = []
with ops.Graph().as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(g)
# Skip creating a read variable if _create_and_assert_global_step
# returns None (e.g. tf.contrib.estimator.SavedModelEstimator).
if global_step_tensor is not None:
training_util._get_or_create_global_step_read(g) # pylint: disable=protected-access
features, labels, input_hooks = (
self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.TRAIN))
worker_hooks.extend(input_hooks)
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
global_step_tensor = training_util.get_global_step(g)
return self._train_with_estimator_spec(estimator_spec, worker_hooks,
hooks, global_step_tensor,
saving_listeners)
def _train_model_distributed(self, input_fn, hooks, saving_listeners):
"""Initiate training with `input_fn`, using `DistributionStrategies`.
Args:
input_fn: A function that provides input data for training as minibatches.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
saving_listeners: list of `tf.train.CheckpointSaverListener` objects. Used
for callbacks that run immediately before or after checkpoint savings.
Returns:
Loss from training
"""
self._train_distribution.configure(self._session_config)
# TODO(sourabhbajaj): Remove this hack once we migrate the other strategies
# to use the new API
is_tpu_strategy = (
self._train_distribution.__class__.__name__ == 'TPUStrategy')
worker_hooks = []
with ops.Graph().as_default() as g:
# We want to create the iterations variable outside the distribution scope
# as that is just stored on the host and mainly used to drive the loop
# and doesn't need to be a Mirrored/Device variable.
steps_per_run_variable = training.get_or_create_steps_per_run_variable()
with self._train_distribution.scope():
random_seed.set_random_seed(self._config.tf_random_seed)
if is_tpu_strategy:
# Create the iterator for run_on_dataset function
# TODO(sourabhbajaj): refactor this out to call a function on the
# strategy
dataset = self._train_distribution.distribute_dataset(
lambda: self._call_input_fn(input_fn, # pylint: disable=g-long-lambda
model_fn_lib.ModeKeys.TRAIN))
iterator = dataset.make_initializable_iterator()
worker_hooks.append(
estimator_util._DatasetInitializerHook(iterator)) # pylint: disable=protected-access
global_step_tensor = self._create_and_assert_global_step(g)
# we want to add to the global collection in the main thread not the
# tower threads.
ops.add_to_collection(
training_util.GLOBAL_STEP_READ_KEY,
self._train_distribution.read_var(global_step_tensor))
# Create a step_fn from the train_op of grouped_estimator_spec
def step_fn(ctx, features, labels):
"""A single step that is passed to run_on_dataset."""
estimator_spec = self._train_distribution.call_for_each_tower(
self._call_model_fn,
features,
labels,
model_fn_lib.ModeKeys.TRAIN,
self.config)
ctx.set_last_step_output(
name='loss',
output=estimator_spec.loss,
aggregation=distribute_lib.get_loss_reduction())
ctx.set_non_tensor_output(
name='estimator_spec', output=estimator_spec)
return estimator_spec.train_op
# Create new train_op post graph rewrites
initial_training_loss = constant_op.constant(1e7)
ctx = self._train_distribution.run_steps_on_dataset(
step_fn, iterator, iterations=steps_per_run_variable,
initial_loop_values={'loss': initial_training_loss})
distributed_train_op = ctx.run_op
tpu_result = ctx.last_step_outputs
grouped_estimator_spec = ctx.non_tensor_outputs['estimator_spec']
else:
features, labels, input_hooks = (
self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.TRAIN,
self._train_distribution))
worker_hooks.extend(input_hooks)
global_step_tensor = self._create_and_assert_global_step(g)
# we want to add to the global collection in the main thread not the
# tower threads.
ops.add_to_collection(
training_util.GLOBAL_STEP_READ_KEY,
self._train_distribution.read_var(global_step_tensor))
grouped_estimator_spec = self._train_distribution.call_for_each_tower(
self._call_model_fn,
features,
labels, # although this will be None it seems
model_fn_lib.ModeKeys.TRAIN,
self.config)
scaffold = _combine_distributed_scaffold(
grouped_estimator_spec.scaffold, self._train_distribution)
def get_hooks_from_the_first_device(per_device_hooks):
hooks_list = self._train_distribution.unwrap(per_device_hooks)
assert hooks_list
return hooks_list[0]
training_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_hooks)
training_chief_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_chief_hooks)
# TODO(sourabhbajaj): Merge the two code paths and clean up the code
if is_tpu_strategy:
loss = tpu_result['loss']
worker_hooks.append(
estimator_util.StrategyInitFinalizeHook(
self._train_distribution.initialize,
self._train_distribution.finalize))
else:
loss = self._train_distribution.unwrap(
self._train_distribution.reduce(
distribute_lib.get_loss_reduction(),
grouped_estimator_spec.loss,
destinations='/device:CPU:0'))[0]
distributed_train_op = grouped_estimator_spec.train_op
estimator_spec = model_fn_lib.EstimatorSpec(
mode=grouped_estimator_spec.mode,
loss=loss,
train_op=self._train_distribution.group(distributed_train_op),
training_hooks=training_hooks,
training_chief_hooks=training_chief_hooks,
scaffold=scaffold)
return self._train_with_estimator_spec(estimator_spec, worker_hooks,
hooks, global_step_tensor,
saving_listeners)
def _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks,
global_step_tensor, saving_listeners):
"""Train a model with the given Estimator Spec."""
if self._warm_start_settings:
logging.info('Warm-starting with WarmStartSettings: %s' %
(self._warm_start_settings,))
warm_starting_util.warm_start(*self._warm_start_settings)
# Check if the user created a loss summary, and add one if they didn't.
# We assume here that the summary is called 'loss'. If it is not, we will
# make another one with the name 'loss' to ensure it shows up in the right
# graph in TensorBoard.
if not any([x.op.name == 'loss'
for x in ops.get_collection(ops.GraphKeys.SUMMARIES)]):
summary.scalar('loss', estimator_spec.loss)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
worker_hooks.extend(hooks)
worker_hooks.append(
training.NanTensorHook(estimator_spec.loss)
)
if self._config.log_step_count_steps is not None:
worker_hooks.append(
training.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=self._config.log_step_count_steps)
)
worker_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
training.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks)
saver_hooks = [
h for h in all_hooks if isinstance(h, training.CheckpointSaverHook)]
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
if not saver_hooks:
chief_hooks = [
training.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold)
]
saver_hooks = [chief_hooks[0]]
if saving_listeners:
if not saver_hooks:
raise ValueError(
'There should be a CheckpointSaverHook to use saving_listeners. '
'Please set one of the RunConfig.save_checkpoints_steps or '
'RunConfig.save_checkpoints_secs.')
else:
# It is expected to have one CheckpointSaverHook. If multiple, we pick
# up the first one to add listener.
saver_hooks[0]._listeners.extend(saving_listeners) # pylint: disable=protected-access
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=worker_hooks,
chief_only_hooks=(
tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)),
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config,
log_step_count_steps=self._config.log_step_count_steps) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
return loss
def _evaluate_build_graph(self, input_fn, hooks=None, checkpoint_path=None):
"""Builds the graph and related hooks to run evaluation."""
random_seed.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(ops.get_default_graph())
features, labels, input_hooks = (
self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.EVAL, self._eval_distribution))
if self._eval_distribution:
(loss_metric, scaffold, evaluation_hooks, eval_metric_ops) = (
self._call_model_fn_eval_distributed(features, labels, self.config))
else:
(loss_metric, scaffold, evaluation_hooks, eval_metric_ops) = (
self._call_model_fn_eval(features, labels, self.config))
global_step_tensor = training_util.get_global_step(ops.get_default_graph())
# Call to warm_start has to be after model_fn is called.
self._maybe_warm_start(checkpoint_path)
if model_fn_lib.LOSS_METRIC_KEY in eval_metric_ops:
raise ValueError(
'Metric with name "%s" is not allowed, because Estimator ' %
(model_fn_lib.LOSS_METRIC_KEY) +
'already defines a default metric with the same name.')
eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric
update_op, eval_dict = _extract_metric_update_ops(eval_metric_ops,
self._eval_distribution)
if ops.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because Estimator '
'already defines a default metric with the same name.')
eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor
all_hooks = list(input_hooks)
all_hooks.extend(hooks)
all_hooks.extend(list(evaluation_hooks or []))
# New local variables have been added, so update the estimator spec's
# local init op if it was defined.
if scaffold and scaffold.local_init_op:
# Ensure that eval step has been created before updating local init op.
evaluation._get_or_create_eval_step() # pylint: disable=protected-access
scaffold = monitored_session.Scaffold(
local_init_op=control_flow_ops.group(
scaffold.local_init_op,
monitored_session.Scaffold.default_local_init_op()),
copy_from_scaffold=scaffold
)
return scaffold, update_op, eval_dict, all_hooks
def _call_model_fn_eval(self, features, labels, config):
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, config)
loss_metric = metrics_lib.mean(estimator_spec.loss)
return (loss_metric, estimator_spec.scaffold,
estimator_spec.evaluation_hooks, estimator_spec.eval_metric_ops)
def _call_model_fn_eval_distributed(self, features, labels, config):
"""Call model_fn in distribution mode and handle return values."""
grouped_estimator_spec = self._eval_distribution.call_for_each_tower(
self._call_model_fn, features, labels,
model_fn_lib.ModeKeys.EVAL, config)
scaffold = _combine_distributed_scaffold(
grouped_estimator_spec.scaffold, self._eval_distribution)
evaluation_hooks = self._eval_distribution.unwrap(
grouped_estimator_spec.evaluation_hooks)[0]
loss_metric = self._eval_distribution.call_for_each_tower(
metrics_lib.mean, grouped_estimator_spec.loss)
return (loss_metric, scaffold,
evaluation_hooks, grouped_estimator_spec.eval_metric_ops)
def _evaluate_run(self, checkpoint_path, scaffold, update_op, eval_dict,
all_hooks, output_dir):
"""Run evaluation."""
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=all_hooks,
config=self._session_config)
current_global_step = eval_results[ops.GraphKeys.GLOBAL_STEP]
_write_dict_to_summary(
output_dir=output_dir,
dictionary=eval_results,
current_global_step=current_global_step)
if checkpoint_path:
_write_checkpoint_path_to_summary(
output_dir=output_dir,
checkpoint_path=checkpoint_path,
current_global_step=current_global_step)
return eval_results
def _maybe_warm_start(self, checkpoint_path):
if not checkpoint_path and self._warm_start_settings:
logging.info('Warm-starting with WarmStartSettings: %s' %
(self._warm_start_settings,))
warm_starting_util.warm_start(*self._warm_start_settings)
def maybe_overwrite_model_dir_and_session_config(config, model_dir):
"""Overwrite estimator config by `model_dir` and `session_config` if needed.
Args:
config: Original estimator config.
model_dir: Estimator model checkpoint directory.
Returns:
Overwritten estimator config.
Raises:
ValueError: Model directory inconsistent between `model_dir` and `config`.
"""
if config is None:
config = run_config.RunConfig()
logging.info('Using default config.')
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of `RunConfig`, but provided %s.' % config)
if config.session_config is None:
session_config = run_config.get_default_session_config()
config = run_config.RunConfig.replace(config, session_config=session_config)
model_dir = compat_internal.path_to_str(model_dir)
if model_dir is not None:
if (getattr(config, 'model_dir', None) is not None and
config.model_dir != model_dir):
raise ValueError(
"`model_dir` are set both in constructor and `RunConfig`, but with "
"different values. In constructor: '{}', in `RunConfig`: "
"'{}' ".format(model_dir, config.model_dir))
if model_dir:
config = run_config.RunConfig.replace(config, model_dir=model_dir)
elif getattr(config, 'model_dir', None) is None:
model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s', model_dir)
config = run_config.RunConfig.replace(config, model_dir=model_dir)
return config
def create_per_tower_ready_op(scaffold):
"""Create a `tf.train.Scaffold.ready_op` inside a tower."""
if scaffold.ready_op:
return scaffold.ready_op
def default_ready_op():
return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
return monitored_session.Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP, default_ready_op)
def create_per_tower_ready_for_local_init_op(scaffold):
"""Create a `tf.train.Scaffold.ready_for_local_init_op` inside a tower."""
if scaffold.ready_for_local_init_op:
return scaffold.ready_for_local_init_op
def default_ready_for_local_init_op():
return variables.report_uninitialized_variables(
variables.global_variables())
return monitored_session.Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
def _combine_distributed_scaffold(grouped_scaffold, distribution):
"""Combines scaffold(s) returned from `distribution.call_for_each_tower`."""
# TODO(anjalisridhar): Figure out how to resolve the following scaffold
# parameters: init_feed_dict, init_fn.
scaffold_list = distribution.unwrap(grouped_scaffold)
init_feed_dict = [
s.init_feed_dict
for s in scaffold_list
if s.init_feed_dict is not None
]
if init_feed_dict:
init_feed_dict = distribution.group(init_feed_dict)
else:
init_feed_dict = None
init_fn = [s.init_fn for s in scaffold_list if s.init_fn is not None]
if init_fn:
init_fn = distribution.group(init_fn)
else:
init_fn = None
init_op = [s.init_op for s in scaffold_list if s.init_op is not None]
if init_op:
init_op = distribution.group(init_op)
else:
init_op = None
def _unwrap_and_concat(value):
value = nest.flatten(distribution.unwrap(value))
if len(value) != 1:
return array_ops.concat(value)
return value[0]
ready_op = distribution.call_for_each_tower(
create_per_tower_ready_op, grouped_scaffold)
if ready_op is not None:
ready_op = _unwrap_and_concat(ready_op)
else:
ready_op = None
ready_for_local_init_op = distribution.call_for_each_tower(
create_per_tower_ready_for_local_init_op, grouped_scaffold)
if ready_for_local_init_op is not None:
ready_for_local_init_op = _unwrap_and_concat(ready_for_local_init_op)
else:
ready_for_local_init_op = None
local_init_op = [
s.local_init_op
for s in scaffold_list
if s.local_init_op is not None
]
if local_init_op:
local_init_op = distribution.group(local_init_op)
else:
local_init_op = None
summary_op = [
s.summary_op for s in scaffold_list if s.summary_op is not None
]
if summary_op:
summary_op = distribution.group(summary_op)
else:
summary_op = None
scaffold = monitored_session.Scaffold(
init_op=init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op,
local_init_op=local_init_op,
summary_op=summary_op,
init_feed_dict=init_feed_dict,
init_fn=init_fn)
return scaffold
def _check_checkpoint_available(model_dir):
latest_path = checkpoint_management.latest_checkpoint(model_dir)
if not latest_path:
raise ValueError(
'Could not find trained model in model_dir: {}.'.format(model_dir))
def _check_hooks_type(hooks):
"""Returns hooks if all are `SessionRunHook`, raises TypeError otherwise."""
hooks = list(hooks or [])
for h in hooks:
if not isinstance(h, training.SessionRunHook):
raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))
return hooks
def _check_listeners_type(saving_listeners):
"""Check listeners type."""
listeners = list(saving_listeners or [])
for l in listeners:
if not isinstance(l, training.CheckpointSaverListener):
raise TypeError(
'saving_listeners must be a list of CheckpointSaverListener, '
'given: {}'.format(l))
return listeners
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default `device_fn`.
`Estimator` uses `tf.train.ReplicaDeviceSetter` as a default device placer. It
sets the
distributed related arguments such as number of `ps_replicas` based on given
`config`.
Args:
config: A `tf.estimator.RunConfig` instance.
Returns:
A replica device setter, or `None`.
"""
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=list(device_setter.STANDARD_PS_OPS),
cluster=config.cluster_spec)
else:
return None
def _verify_model_fn_args(model_fn, params):
"""Verifies `model_fn` arguments."""
args = set(function_utils.fn_args(model_fn))
if 'features' not in args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if params is not None and 'params' not in args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' % (model_fn,
params))
if params is None and 'params' in args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
non_valid_args = list(args - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = training.NewCheckpointReader(
training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict, distribution=None):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, metric_ops in sorted(six.iteritems(eval_dict)):
value_ops[name] = metric_ops[0]
if distribution:
update_op = distribution.group(metric_ops[1])
else:
update_op = metric_ops[1]
update_ops.append(update_op)
if update_ops:
update_op = control_flow_ops.group(*update_ops)
else:
update_op = None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary))
if not isinstance(v, six.binary_type))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.binary_type):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = '%s/%d' % (key, i)
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tensor_util.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
# pylint: disable=line-too-long
logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
# pylint: enable=line-too-long
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
def _write_checkpoint_path_to_summary(output_dir, checkpoint_path,
current_global_step):
"""Writes `checkpoint_path` into summary file in the given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
checkpoint_path: `str`, checkpoint file path to be written to summary file.
current_global_step: `int`, the current global step.
"""
checkpoint_path_tag = 'checkpoint_path'
logging.info('Saving \'%s\' summary for global step %d: %s',
checkpoint_path_tag, current_global_step, checkpoint_path)
summary_proto = summary_pb2.Summary()
summary_proto.value.add(
tag=checkpoint_path_tag,
tensor=tensor_util.make_tensor_proto(
checkpoint_path, dtype=dtypes.string))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
def _has_dataset_or_queue_runner(maybe_tensor):
"""Returns `True` if `Dataset` or `QueueRunner` has been used."""
# Check TF dataset first. Here, we use a simple algorithm to check the top
# level Tensors only, which should be sufficient for most users.
tensors = [x for x in nest.flatten(maybe_tensor) if isinstance(x, ops.Tensor)]
if any([t.op.type == 'IteratorGetNext' for t in tensors]):
return True
# Now, check queue.
return ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS)
VocabInfo = warm_starting_util.VocabInfo # pylint: disable=invalid-name
estimator_export('estimator.VocabInfo')(VocabInfo)
@estimator_export('estimator.WarmStartSettings')
class WarmStartSettings(
collections.namedtuple('WarmStartSettings', [
'ckpt_to_initialize_from',
'vars_to_warm_start',
'var_name_to_vocab_info',
'var_name_to_prev_var_name',
])):
"""Settings for warm-starting in `tf.estimator.Estimators`.
Example Use with canned `tf.estimator.DNNEstimator`:
```
emb_vocab_file = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_file(
"sc_vocab_file", "new_vocab.txt", vocab_size=100),
dimension=8)
emb_vocab_list = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
"sc_vocab_list", vocabulary_list=["a", "b"]),
dimension=8)
estimator = tf.estimator.DNNClassifier(
hidden_units=[128, 64], feature_columns=[emb_vocab_file, emb_vocab_list],
warm_start_from=ws)
```
where `ws` could be defined as:
Warm-start all weights in the model (input layer and hidden weights).
Either the directory or a specific checkpoint can be provided (in the case
of the former, the latest checkpoint will be used):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp")
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp/model-1000")
```
Warm-start only the embeddings (input layer):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp",
vars_to_warm_start=".*input_layer.*")
```
Warm-start all weights but the embedding parameters corresponding to
`sc_vocab_file` have a different vocab from the one used in the current
model:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt"
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start only `sc_vocab_file` embeddings (and no other variables), which
have a different vocab from the one used in the current model:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt"
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
vars_to_warm_start=None,
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start all weights but the parameters corresponding to `sc_vocab_file`
have a different vocab from the one used in current checkpoint, and only
100 of those entries were used:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt",
old_vocab_size=100
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start all weights but the parameters corresponding to `sc_vocab_file`
have a different vocab from the one used in current checkpoint and the
parameters corresponding to `sc_vocab_list` have a different name from the
current checkpoint:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt",
old_vocab_size=100
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
},
var_name_to_prev_var_name={
"input_layer/sc_vocab_list_embedding/embedding_weights":
"old_tensor_name"
})
```
Attributes:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
vars_to_warm_start: [Optional] One of the following: - A regular expression
(string) that captures which variables to warm-start (see
`tf.get_collection`). This expression will only consider variables in the
`TRAINABLE_VARIABLES` collection. - A list of Variables to warm-start. - A
list of strings, each representing a full variable name to warm-start. -
`None`, in which case only variables specified in `var_name_to_vocab_info`
will be warm-started. Defaults to `'.*'`, which warm-starts all variables
in the `TRAINABLE_VARIABLES` collection. Note that this excludes
variables such as accumulators and moving statistics from batch norm.
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
`tf.estimator.VocabInfo`. The variable names should be "full" variables,
not the names of the partitions. If not explicitly provided, the variable
is assumed to have no vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
between previous checkpoint and current model.
"""
def __new__(cls,
ckpt_to_initialize_from,
vars_to_warm_start='.*',
var_name_to_vocab_info=None,
var_name_to_prev_var_name=None):
if not ckpt_to_initialize_from:
raise ValueError(
'`ckpt_to_initialize_from` MUST be set in WarmStartSettings')
return super(WarmStartSettings, cls).__new__(
cls,
ckpt_to_initialize_from,
vars_to_warm_start,
var_name_to_vocab_info or {},
var_name_to_prev_var_name or {},
)
def _get_saved_model_ckpt(saved_model_dir):
"""Return path to variables checkpoint in a `SavedModel` directory."""
if not gfile.Exists(
os.path.join(saved_model_utils.get_variables_dir(saved_model_dir),
compat.as_text('variables.index'))):
raise ValueError('Directory provided has an invalid SavedModel format: %s'
% saved_model_dir)
return saved_model_utils.get_variables_path(saved_model_dir)
def _get_default_warm_start_settings(warm_start_from):
"""Returns default `tf.estimator.WarmStartSettings`.
Args:
warm_start_from: Either a string representing the filepath of a checkpoint
or `SavedModel` to initialize from, or an instance of
`tf.estimator.WarmStartSettings`.
Returns:
Either None or an instance of `WarmStartSettings`.
Raises:
ValueError: If `warm_start_from` is not `None` but is neither a string nor
an
instance of `WarmStartSettings`.
"""
if warm_start_from is None:
return None
if isinstance(warm_start_from, (six.string_types, six.binary_type)):
# Infer that this is a SavedModel if export_path +
# 'variables/variables.index' exists, and if so, construct the
# WarmStartSettings pointing to the variables path
# (export_path + 'variables/variables').
if gfile.Exists(os.path.join(
saved_model_utils.get_variables_dir(warm_start_from),
compat.as_text('variables.index'))):
logging.info('Warm-starting from a SavedModel')
return WarmStartSettings(
ckpt_to_initialize_from=saved_model_utils.get_variables_path(
warm_start_from))
return WarmStartSettings(ckpt_to_initialize_from=warm_start_from)
elif isinstance(warm_start_from, WarmStartSettings):
return warm_start_from
else:
raise ValueError('warm_start_from must be a string or a WarmStartSettings, '
'instead got {}'.format(type(warm_start_from)))
| ZhangXinNan/tensorflow | tensorflow/python/estimator/estimator.py | Python | apache-2.0 | 87,999 |
#
# (C)opyright 2015 Signal Processing Devices Sweden AB
#
# This script showcases in Python
# - How to connect to ADQ devices in Python
# - Upload of waveforms to the SDR14
# - Using a playlist on the SDR14
# - How to setup an acquisition of data
# - How to read data by GetData API in Python
# - How to plot data in Python
#
# Note: The example is intended to use the SDR14 device connected in loopback mode (i.e. connect DAC output to ADC input)
import numpy as np
import ctypes as ct
import matplotlib.pyplot as plt
def set_playlist( adq_cu, adq_num, dac_id, tcstr ):
tc = {}
if (tcstr == 'basic1'):
ns = 2 # Number of items
tc["ns"] = ns
# 1 2 3 4 5 6 7 8 9
tc["index"] = (ct.c_uint32 * ns)( 1, 2)
tc["segment"] = (ct.c_uint32 * ns)( 1, 2)
tc["next"] = (ct.c_uint32 * ns)( 2, 1)
tc["wrap"] = (ct.c_uint32 * ns)( 4, 3)
tc["ulsign"] = (ct.c_uint32 * ns)( 0, 0)
tc["trigtype"] = (ct.c_uint32 * ns)( 1, 1)
tc["triglength"] = (ct.c_uint32 * ns)( 50, 50)
tc["trigpolarity"]=(ct.c_uint32 * ns)( 0, 0)
tc["trigsample"]= (ct.c_uint32 * ns)( 1, 1)
tc["writemask"]= (ct.c_uint32 * ns)( 15, 15)
# Transfer playlist to device
ADQAPI.ADQ_AWGWritePlaylist( adq_cu, adq_num, dac_id, tc['ns'], ct.byref(tc['index']), ct.byref(tc['writemask']), ct.byref(tc['segment']), ct.byref(tc['wrap']), ct.byref(tc['next']), ct.byref(tc['trigtype']), ct.byref(tc['triglength']), ct.byref(tc['trigpolarity']), ct.byref(tc['trigsample']), ct.byref(tc['ulsign']) )
# Select the Playlist mode
ADQAPI.ADQ_AWGPlaylistMode( adq_cu, adq_num, dac_id, 1)
return tc
def lessen_to_14bits( databuf ):
for x in range(0,4096):
databuf[x] = databuf[x] & 0x3FFF;
return databuf
def define_and_upload_segments( adq_cu, adq_num, dac_id ):
# Setup target buffers for upload of data
number_of_data_segments = 3
data_length = 4096
data_buffers=(ct.POINTER(ct.c_int16*data_length)*number_of_data_segments)()
databuf = np.zeros((number_of_data_segments,data_length))
for bufp in data_buffers:
bufp.contents = (ct.c_int16*data_length)()
# Re-arrange data in numpy arrays
databuf = np.frombuffer(data_buffers[0].contents,dtype=np.int16)
#Create sawtooth
for x in range(0, 1024):
databuf[x] = x
databuf[x+1024] = 1024 - x
databuf[x+2048] = -x
databuf[x+2048+1024] = -1024 + x
databuf = lessen_to_14bits(databuf)
databuf = np.frombuffer(data_buffers[1].contents,dtype=np.int16)
#Create positive pulse
for x in range(0, 128):
databuf[x] = 1024+x
databuf[x+128] = 1300+x
databuf[x+256] = 1300+128-x
for x in range(384, 4096):
databuf[x] = 0
databuf = lessen_to_14bits(databuf)
#Create negative pulse (one level)
databuf = np.frombuffer(data_buffers[2].contents,dtype=np.int16)
for x in range(0, 256):
databuf[x] = -512
for x in range(256, 4096):
databuf[x] = 0
databuf = lessen_to_14bits(databuf)
length_np = (ct.c_uint32 * number_of_data_segments)(data_length, data_length, data_length)
segId_np = (ct.c_uint32 * number_of_data_segments)(1, 2, 3)
NofLaps_np = (ct.c_uint32 * number_of_data_segments)(3, 3, 3)
for idx,bufp in enumerate(data_buffers):
ADQAPI.ADQ_AWGSegmentMalloc( adq_cu, adq_num, dac_id, idx+1, length_np[idx], 0)
ADQAPI.ADQ_AWGWriteSegments( adq_cu, adq_num, dac_id, number_of_data_segments, ct.byref(segId_np), ct.byref(NofLaps_np), ct.byref(length_np), data_buffers )
# Note: In playlist mode, all used segments must be in the enabled range, otherwise plaqyback will stop
ADQAPI.ADQ_AWGEnableSegments( adq_cu, adq_num, dac_id, number_of_data_segments )
return
# For Python under Linux (uncomment in Linux)
#ADQAPI = ct.cdll.LoadLibrary("libadq.so")
# For Python under Windows
ADQAPI = ct.cdll.LoadLibrary("ADQAPI.dll")
ADQAPI.ADQAPI_GetRevision()
# Manually set return type from some ADQAPI functions
ADQAPI.CreateADQControlUnit.restype = ct.c_void_p
ADQAPI.ADQ_GetRevision.restype = ct.c_void_p
ADQAPI.ADQ_GetPtrStream.restype = ct.POINTER(ct.c_int16)
ADQAPI.ADQControlUnit_FindDevices.argtypes = [ct.c_void_p]
# Create ADQControlUnit
adq_cu = ct.c_void_p(ADQAPI.CreateADQControlUnit())
ADQAPI.ADQControlUnit_EnableErrorTrace(adq_cu, 3, '.')
adq_num = 1
dac_id = 1
bypass_analog = 1
# Convenience function
def adq_status(status):
if (status==0):
return 'FAILURE'
else:
return 'OK'
# Find ADQ devices
ADQAPI.ADQControlUnit_FindDevices(adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)
print('Number of ADQ found: {}'.format(n_of_ADQ))
if n_of_ADQ > 0:
# Get revision info from ADQ
rev = ADQAPI.ADQ_GetRevision(adq_cu, adq_num)
revision = ct.cast(rev,ct.POINTER(ct.c_int))
print('\nConnected to ADQ #1')
# Print revision information
print('FPGA Revision: {}'.format(revision[0]))
if (revision[1]):
print('Local copy')
else :
print('SVN Managed')
if (revision[2]):
print('Mixed Revision')
else :
print('SVN Updated')
print('')
# Choose whether to bypass_analog
ADQAPI.ADQ_WriteRegister(adq_cu, adq_num, 10240, 0, 2*bypass_analog);
# Upload data to SDR14
define_and_upload_segments(adq_cu, adq_num, dac_id)
set_playlist(adq_cu, adq_num, dac_id, 'basic1')
ADQAPI.ADQ_AWGAutoRearm(adq_cu, adq_num, dac_id, 1)
ADQAPI.ADQ_AWGContinuous(adq_cu, adq_num, dac_id, 0)
ADQAPI.ADQ_AWGSetTriggerEnable(adq_cu, adq_num, 31)
ADQAPI.ADQ_AWGArm(adq_cu, adq_num, dac_id)
#ADQAPI.ADQ_AWGTrig(adq_cu, adq_num, dac_id)
# Set clock source
ADQ_CLOCK_INT_INTREF = 0
ADQAPI.ADQ_SetClockSource(adq_cu, adq_num, ADQ_CLOCK_INT_INTREF);
# Set trig mode
SW_TRIG = 1
EXT_TRIG_1 = 2
EXT_TRIG_2 = 7
EXT_TRIG_3 = 8
LVL_TRIG = 3
INT_TRIG = 4
LVL_FALLING = 0
LVL_RISING = 1
trigger = SW_TRIG
success = ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, trigger)
if (success == 0):
print('ADQ_SetTriggerMode failed.')
number_of_records = 1
samples_per_record = 65536
# Start acquisition
ADQAPI.ADQ_MultiRecordSetup(adq_cu, adq_num,
number_of_records,
samples_per_record)
ADQAPI.ADQ_DisarmTrigger(adq_cu, adq_num)
ADQAPI.ADQ_ArmTrigger(adq_cu, adq_num)
while(ADQAPI.ADQ_GetAcquiredAll(adq_cu,adq_num) == 0):
if (trigger == SW_TRIG):
ADQAPI.ADQ_SWTrig(adq_cu, adq_num)
print('Waiting for trigger')
# Setup target buffers for data
max_number_of_channels = 2
target_buffers=(ct.POINTER(ct.c_int16*samples_per_record*number_of_records)*max_number_of_channels)()
for bufp in target_buffers:
bufp.contents = (ct.c_int16*samples_per_record*number_of_records)()
# Get data from ADQ
ADQ_TRANSFER_MODE_NORMAL = 0
ADQ_CHANNELS_MASK = 0x3
status = ADQAPI.ADQ_GetData(adq_cu, adq_num, target_buffers,
samples_per_record*number_of_records, 2,
0, number_of_records, ADQ_CHANNELS_MASK,
0, samples_per_record, ADQ_TRANSFER_MODE_NORMAL);
print('ADQ_GetData returned {}'.format(adq_status(status)))
# Re-arrange data in numpy arrays
data_16bit_ch0 = np.frombuffer(target_buffers[0].contents[0],dtype=np.int16)
data_16bit_ch1 = np.frombuffer(target_buffers[1].contents[0],dtype=np.int16)
# Plot data
if True:
plt.figure(1)
plt.clf()
plt.plot(data_16bit_ch0, '.-')
plt.plot(data_16bit_ch1, '.--')
plt.show()
# Only disarm trigger after data is collected
ADQAPI.ADQ_DisarmTrigger(adq_cu, adq_num)
ADQAPI.ADQ_MultiRecordClose(adq_cu, adq_num);
# Delete ADQControlunit
ADQAPI.DeleteADQControlUnit(adq_cu);
print('Done')
else:
print('No ADQ connected.')
# This can be used to completely unload the DLL in Windows
#ct.windll.kernel32.FreeLibrary(ADQAPI._handle)
| thomasbarillot/DAQ | HHGMonitor/ADQAPI_python/SDR14_Playlist_example.py | Python | mit | 8,460 |
from .documents import *
from .notes import *
from .topics import *
| editorsnotes/editorsnotes | editorsnotes/main/models/__init__.py | Python | agpl-3.0 | 68 |
from __future__ import absolute_import
from sentry.testutils import TestCase
from sentry.utils.safe import safe_execute, trim, trim_dict
a_very_long_string = 'a' * 1024
class TrimTest(TestCase):
def test_simple_string(self):
assert trim(a_very_long_string) == a_very_long_string[:509] + '...'
def test_list_of_strings(self):
assert trim([a_very_long_string, a_very_long_string]) == [
a_very_long_string[:507] + '...',
]
class TrimDictTest(TestCase):
def test_large_dict(self):
value = dict((k, k) for k in xrange(500))
trim_dict(value)
assert len(value) == 50
class SafeExecuteTest(TestCase):
def test_with_nameless_function(self):
assert safe_execute(lambda a: a, 1) == 1
assert safe_execute(lambda: a) is None # NOQA
def test_with_simple_function(self):
def simple(a):
return a
assert safe_execute(simple, 1) == 1
def simple(a):
raise Exception()
assert safe_execute(simple, 1) is None
def test_with_instance_method(self):
class Foo(object):
def simple(self, a):
return a
assert safe_execute(Foo().simple, 1) == 1
class Foo(object):
def simple(self, a):
raise Exception()
assert safe_execute(Foo().simple, 1) is None
| jokey2k/sentry | tests/sentry/utils/test_safe.py | Python | bsd-3-clause | 1,381 |
# (c) Copyright 2018-2019 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .playbooks import run_playbook
from .plays import get_metadata_file
from flask import abort
from flask import Blueprint
from flask import jsonify
from flask import request
import itertools
import json
import os
from os.path import dirname
from os.path import exists
from os.path import join
from oslo_config import cfg
from oslo_log import log as logging
import re
import subprocess
from . import policy
from time import sleep
LOG = logging.getLogger(__name__)
bp = Blueprint('packages', __name__)
PKG_CACHE_FILE = cfg.CONF.paths.packages_cache
HOST_PKGS_FILE = cfg.CONF.paths.packages_hosts_data
PACKAGES_PLAY = "_ardana-service-get-pkgdata"
@bp.route("/api/v2/packages", methods=['GET'])
@policy.enforce('lifecycle:list_packages')
def get_packages():
"""Get installed venv packages and SUSE-Openstack-installed packages
This caches the ardana and venv-openstack packages installed on the
deployer and returns a list of ardana packages.
.. :quickref: Packages; list ardana packages and openstack venv versions
**Example Request**:
.. sourcecode:: http
GET /api/v2/packages HTTP/1.1
Content-Type: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
{
"cloud_installed_packages": [{
"name": "ardana-logging",
"versions": ["8.0+git.1531134017.565cede-102.1"]
}, {
"name": "ardana-nova",
"versions": ["8.0+git.1528891405.336a954-103.6"]
}, ... <and so on>],
"openstack_venv_packages": [{
"available": "2.2.1-19.116",
"installed": ["2.2.1-9.1", "2.2.1-19.116"],
"name": "monasca"
}, {
"available": "9.0.2-19.124",
"installed": ["9.0.2-19.124"],
"name": "ceilometer"
}, ... <and so on>]
}
"""
if cfg.CONF.testing.use_mock:
mock_json = "tools/packages.json"
json_file = join(dirname(dirname(__file__)), mock_json)
with open(json_file) as f:
return jsonify(json.load(f))
installed_os_pkgs, os_pkg_cache = update_openstack_pkg_cache()
# Run the playbook to get package data from all the hosts in the model
proc_info = {}
try:
vars = {
"extra-vars": {
"host_pkgs_file": HOST_PKGS_FILE
}
}
# encrypt is needed to run playbook if cloud config is encrypted.
# It is passed in as a header because there is no body in HTTP GET
# API.
encrypt = request.headers.get('encrypt')
if encrypt:
vars['extra-vars']['encrypt'] = encrypt
play_id = run_playbook(PACKAGES_PLAY, vars)["id"]
# Poll for "code" and ignore its value because some hosts may be down.
while 'code' not in proc_info:
with open(get_metadata_file(play_id)) as f:
proc_info = json.load(f)
if 'code' not in proc_info:
sleep(1)
except Exception as e:
LOG.error("Could not get remote package information: %s" % e)
abort(404, "Remote package information unavailable")
# host_pkgs example structure created by PACKAGES_PLAY playbook run:
# {
# "host1": {
# # list of installed timestamped openstack venv packages on host1
# "ts_os_pkgs": [
# "barbican-20180820T201055Z",
# "cinder-20180820T190043Z", ...
# ],
# # list of SUSE-Openstack-cloud packages installed on host1
# "zypper_cloud_pkgs": {
# "python-PasteDeploy": "1.5.2-1.52",
# "python-pymongo": "3.1.1-1.55", ...
# }
# },
# "host2": { ... }
# }
try:
with open(HOST_PKGS_FILE) as f:
host_pkgs = json.load(f)
except Exception as e:
LOG.error("Could not retrieve remote host pkg data from %s: %s"
% (HOST_PKGS_FILE, e))
abort(404, "Remote package information unavailable")
finally:
if exists(HOST_PKGS_FILE):
os.remove(HOST_PKGS_FILE)
# Reconcile openstack timestamps to versions installed on each system
all_ts_os_pkgs = [host['ts_os_pkgs'] for host in host_pkgs.values()]
uniq_ts_pkgs = set(itertools.chain.from_iterable(all_ts_os_pkgs))
re_name_ts = re.compile(r'(?P<name>[\w-]+)-\d+T\d+Z')
for pkg in uniq_ts_pkgs:
pkg_match = re_name_ts.match(pkg)
if not pkg_match:
LOG.warning('Unrecognized package format: %s' % pkg)
continue
name = pkg_match.group('name')
if not installed_os_pkgs.get(name):
LOG.warning('Unrecognized service name: %s' % name)
continue
version = os_pkg_cache.get(pkg)
if version:
installed_os_pkgs[name]['installed'].append(version)
else:
# We don't know what version this is, so we'll just add
# the timestamped package name in there (should never happen)
installed_os_pkgs[name]['installed'].append(pkg)
ovp = [
{
'name': k,
'installed': v['installed'],
'available': v['available']
} for k, v in installed_os_pkgs.items()]
# Create a list of unique SUSE-Openstack installed packages across all
# systems
pkgs_dict = {}
for host in host_pkgs.values():
for name, version in host['zypper_cloud_pkgs'].iteritems():
if name not in pkgs_dict:
pkgs_dict[name] = [version]
elif version not in pkgs_dict[name]:
# this case might only occur during upgrade or partial upgrade
pkgs_dict[name].append(version)
cip = [
{
'name': name,
'versions': versions
} for name, versions in pkgs_dict.items()
]
response = {
'openstack_venv_packages': ovp,
'cloud_installed_packages': cip
}
return jsonify(response)
def update_openstack_pkg_cache():
re_openstack = re.compile(r'venv-openstack-(?P<name>[\w-]+)-')
# contains current AND OLD openstack packages where
# k: timestamped package (i.e. monasca-20180820T190346Z)
# v: version (i.e. 2.2.1-19.155)
# This will build up over time with patches and upgrades
os_pkg_cache = {}
# contains current/available openstack packages installed on the deployer
# k: openstack name (i.e. monasca)
# v: version (i.e. 2.2.1-19.155)
installed_os_pkgs = {}
# Load package cache
try:
with open(PKG_CACHE_FILE) as f:
os_pkg_cache = json.load(f)
except Exception as e:
LOG.info("Could not load %s: %s." % (PKG_CACHE_FILE, e))
# TODO(choyj): The code below could be simplified by using the zypper data
# from the output of PACKAGES_PLAY. But we do not know which model host is
# the deployer other than via educated guess (only deployer has venv pkgs
# installed). So, for now:
# See what openstack packages are installed on this deployer
try:
p = subprocess.Popen(['zypper', '--terse', 'packages', '--installed'],
stdout=subprocess.PIPE)
zyp_lines = p.communicate()[0].decode('utf-8').split('\n')
except OSError:
LOG.error("zypper unavailable or not working on this system")
abort(503, 'zypper unavailable on this host')
for line in zyp_lines:
fields = line.split('|')
# if this is a valid line and the package is installed
if len(fields) == 5 and 'i' in fields[0]:
name = fields[2].strip()
vers = fields[3].strip()
os_match = re_openstack.match(name)
if os_match:
# a venv-openstack package, therefore figure out timestamped
# package to update/add to os_pkg_cache
name_vers = "%s-%s" % (name, vers)
try:
p = subprocess.Popen(
['rpm', '--query', '--list', name_vers],
stdout=subprocess.PIPE)
rpm_lines = p.communicate()[0].split('\n')
project = os_match.group('name')
re_ts_pkg = \
re.compile(r"/(?P<name_ts>%s-\d+T\d+Z).tgz$" % project)
for rpm_line in rpm_lines:
ts_pkg_match = re_ts_pkg.search(rpm_line)
if ts_pkg_match:
os_pkg_cache[ts_pkg_match.group('name_ts')] = vers
installed_os_pkgs[project] = {
'available': vers,
'installed': []
}
break
except OSError as e:
LOG.warning("Could not determine timestamped package for"
" %s: %s" % (name_vers, e))
# Save package cache
try:
with open(PKG_CACHE_FILE, 'w') as f:
json.dump(os_pkg_cache, f, indent=4, sort_keys=True)
except Exception as e:
LOG.info("Could not save %s: %s." % (PKG_CACHE_FILE, e))
return installed_os_pkgs, os_pkg_cache
| ArdanaCLM/ardana-service | ardana_service/packages.py | Python | apache-2.0 | 9,927 |
"""Unit test for KNX string object."""
import pytest
from xknx.dpt import DPTString
from xknx.exceptions import ConversionError
class TestDPTString:
"""Test class for KNX float object."""
def test_value_from_documentation(self):
"""Test parsing and streaming Example from documentation."""
raw = (
0x4B,
0x4E,
0x58,
0x20,
0x69,
0x73,
0x20,
0x4F,
0x4B,
0x00,
0x00,
0x00,
0x00,
0x00,
)
string = "KNX is OK"
assert DPTString.to_knx(string) == raw
assert DPTString.from_knx(raw) == string
def test_value_empty_string(self):
"""Test parsing and streaming empty string."""
raw = (
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
)
string = ""
assert DPTString.to_knx(string) == raw
assert DPTString.from_knx(raw) == string
def test_value_max_string(self):
"""Test parsing and streaming large string."""
raw = (
0x41,
0x41,
0x41,
0x41,
0x41,
0x42,
0x42,
0x42,
0x42,
0x42,
0x43,
0x43,
0x43,
0x43,
)
string = "AAAAABBBBBCCCC"
assert DPTString.to_knx(string) == raw
assert DPTString.from_knx(raw) == string
def test_value_special_chars(self):
"""Test parsing and streaming string with special chars."""
raw = (
0x48,
0x65,
0x79,
0x21,
0x3F,
0x24,
0x20,
0xC4,
0xD6,
0xDC,
0xE4,
0xF6,
0xFC,
0xDF,
)
string = "Hey!?$ ÄÖÜäöüß"
assert DPTString.to_knx(string) == raw
assert DPTString.from_knx(raw) == string
def test_to_knx_invalid_chars(self):
"""Test streaming string with invalid chars."""
raw = (
0x4D,
0x61,
0x74,
0x6F,
0x75,
0x3F,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
)
string = "Matouš"
knx_string = "Matou?"
assert DPTString.to_knx(string) == raw
assert DPTString.from_knx(raw) == knx_string
def test_to_knx_too_long(self):
"""Test serializing DPTString to KNX with wrong value (to long)."""
with pytest.raises(ConversionError):
DPTString.to_knx("AAAAABBBBBCCCCx")
def test_from_knx_wrong_parameter_too_large(self):
"""Test parsing of KNX string with too many elements."""
raw = (
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
)
with pytest.raises(ConversionError):
DPTString.from_knx(raw)
def test_from_knx_wrong_parameter_too_small(self):
"""Test parsing of KNX string with too less elements."""
raw = (
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
)
with pytest.raises(ConversionError):
DPTString.from_knx(raw)
| XKNX/xknx | test/dpt_tests/dpt_string_test.py | Python | mit | 3,926 |
import django.db.models as m
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from taggit.managers import TaggableManager
class Donation(m.Model):
"""Record of a donation.
`compliance_information` is a JSON-structured dictionary of information
needed when producing compliance reports, e.g.::
{ "occupation": ...
, "employer": ...
, "compliance_statement": True
}
"""
PAYMENT_METHOD_CHOICES = [
('CC', 'Credit Card'),
('PayPal', 'PayPal'),
('Check', 'Check'),
('Cash', 'Cash'),
]
content_type = m.ForeignKey(ContentType)
object_id = m.PositiveIntegerField()
contact = generic.GenericForeignKey('content_type', 'object_id')
amount = m.DecimalField(max_digits=10, decimal_places=2)
payment_method = m.CharField(max_length=10, choices=PAYMENT_METHOD_CHOICES)
compliance_information = m.TextField()
timestamp = m.DateTimeField(auto_now_add=True)
| actwithus/actwithus | awufundraiser/awufundraiser/models.py | Python | apache-2.0 | 1,028 |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 18:05:30 2017
@author: 53771
"""
import loadStock as ls
import tushare as ts
from datetime import datetime
import matplotlib.pyplot as plt
import pandas_candlestick_ohlc as pohlc
import pandas as pd
Vanke=ls.read_hit_data('000001') | hyflashstar/gupiao | src/动量交易.py | Python | apache-2.0 | 284 |
"""import os
import uuid
from ..tools import row2dict, xls_reader
from datetime import datetime
from sqlalchemy import not_, func
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from deform import (
Form,
widget,
ValidationFailure,
)
from ..models import (
DBSession,
Resource
)
from datatables import ColumnDT, DataTables
from ..views.base_view import BaseViews
SESS_ADD_FAILED = 'Tambah resource gagal'
SESS_EDIT_FAILED = 'Edit resource gagal'
class AddSchema(colander.Schema):
resource_name = colander.SchemaNode(
colander.String())
resource_type = colander.SchemaNode(
colander.String())
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.String(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True))
class view_resource(BaseViews):
########
# List #
########
@view_config(route_name='resource', renderer='templates/resource/list.pt',
permission='read')
def view_list(self):
return dict(a={})
##########
# Action #
##########
@view_config(route_name='resource-act', renderer='json',
permission='read')
def resource_act(self):
ses = self.request.session
req = self.request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('resource_id'))
columns.append(ColumnDT('resource_name'))
columns.append(ColumnDT('resource_type'))
columns.append(ColumnDT('ordering'))
query = DBSession.query(Resource)
rowTable = DataTables(req, Resource, query, columns)
return rowTable.output_result()
#######
# Add #
#######
def form_validator(self, form, value):
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(Resource).filter_by(id=uid)
resource = q.first()
else:
resource = None
def get_form(self, class_form, row=None):
schema = class_form(validator=self.form_validator)
schema = schema.bind()
schema.request = self.request
if row:
schema.deserialize(row)
return Form(schema, buttons=('simpan','batal'))
def save(self, values, user, row=None):
if not row:
row = Resource()
row.created = datetime.now()
row.create_uid = user.id
row.from_dict(values)
row.updated = datetime.now()
row.update_uid = user.id
row.disabled = 'disabled' in values and values['disabled'] and 1 or 0
DBSession.add(row)
DBSession.flush()
return row
def save_request(self, values, row=None):
if 'id' in self.request.matchdict:
values['id'] = self.request.matchdict['id']
row = self.save(values, self.request.user, row)
self.request.session.flash('resource sudah disimpan.')
def route_list(self):
return HTTPFound(location=self.request.route_url('resource'))
def session_failed(self, session_name):
r = dict(form=self.session[session_name])
del self.session[session_name]
return r
@view_config(route_name='resource-add', renderer='templates/resource/add.pt',
permission='add')
def view_resource_add(self):
req = self.request
ses = self.session
form = self.get_form(AddSchema)
if req.POST:
if 'simpan' in req.POST:
controls = req.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
req.session[SESS_ADD_FAILED] = e.render()
return HTTPFound(location=req.route_url('resource-add'))
self.save_request(dict(controls))
return self.route_list()
elif SESS_ADD_FAILED in req.session:
return self.session_failed(SESS_ADD_FAILED)
return dict(form=form.render())
########
# Edit #
########
def query_id(self):
return DBSession.query(Resource).filter_by(id=self.request.matchdict['id'])
def id_not_found(self):
msg = 'resource ID %s Tidak Ditemukan.' % self.request.matchdict['id']
request.session.flash(msg, 'error')
return route_list()
@view_config(route_name='resource-edit', renderer='templates/resource/edit.pt',
permission='edit')
def view_resource_edit(self):
request = self.request
row = self.query_id().first()
if not row:
return id_not_found(request)
form = self.get_form(EditSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
print controls
try:
c = form.validate(controls)
except ValidationFailure, e:
request.session[SESS_EDIT_FAILED] = e.render()
return HTTPFound(location=request.route_url('resource-edit',
id=row.id))
self.save_request(dict(controls), row)
return self.route_list()
elif SESS_EDIT_FAILED in request.session:
return self.session_failed(SESS_EDIT_FAILED)
values = row.to_dict()
return dict(form=form.render(appstruct=values))
##########
# Delete #
##########
@view_config(route_name='resource-delete', renderer='templates/resource/delete.pt',
permission='delete')
def view_resource_delete(self):
request = self.request
q = self.query_id()
row = q.first()
if not row:
return self.id_not_found(request)
form = Form(colander.Schema(), buttons=('hapus','batal'))
if request.POST:
if 'hapus' in request.POST:
msg = 'resource ID %d %s sudah dihapus.' % (row.id, row.description)
try:
q.delete()
DBSession.flush()
except:
msg = 'resource ID %d %s tidak dapat dihapus.' % (row.id, row.description)
request.session.flash(msg)
return self.route_list()
return dict(row=row,
form=form.render())
""" | aagusti/sp2d | sp2d/views/admin/m_resource.py | Python | mit | 6,833 |
#!/usr/bin/env python
import nirvana
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme:
long_description = readme.read()
setup(
name='nirvana',
version=nirvana.__version__,
description=('Library for interacting with the Nirvana task manager '
'(nirvanahq.com)'),
long_description=long_description,
author='Nick Wilson',
author_email='nick@njwilson.net',
url='http://nirvana-python.readthedocs.org',
license='MIT',
packages=['nirvana'],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| njwilson/nirvana-python | setup.py | Python | mit | 1,039 |
#!/usr/bin/env python
# Copyright (C) 2006-2019 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import sys
import numpy as np
import essentia.standard as es
from essentia import array as esarr
sys.path.insert(0, './')
from qa_test import *
from qa_testevents import QaTestEvents
# parameters
sampleRate = 44100.
frameSize = 512
hopSize = 256
minimumDuration = 0.005 # ms
# inner variables
idx = 0
previousRegion = None
class EssentiaWrap(QaWrapper):
"""
Essentia Solution.
"""
algo = es.SaturationDetector(frameSize=frameSize, hopSize=hopSize)
def compute(self, *args):
x = args[1]
y = []
self.algo.reset()
for frame in es.FrameGenerator(x, frameSize=frameSize, hopSize=hopSize,
startFromZero=True):
starts, ends = self.algo(frame)
if len(starts) > 0:
for start in starts:
y.append(start)
return esarr(y)
class DevWrap(QaWrapper):
"""
Development Solution.
"""
previousRegion = None
def compute(self, *args):
x = args[1]
y = []
idx = 0
for frame in es.FrameGenerator(x, frameSize=frameSize,
hopSize=hopSize,
startFromZero=True):
frame = np.abs(frame)
starts = []
ends = []
s = int(frameSize // 2 - hopSize // 2) - 1 # consider non overlapping case
e = int(frameSize // 2 + hopSize // 2)
delta = np.diff(frame)
delta = np.insert(delta, 0, 0)
energyMask = np.array([x > .9 for x in frame])[s:e].astype(int)
deltaMask = np.array([np.abs(x) < .01 for x in delta])[s:e].astype(int)
combinedMask = energyMask * deltaMask
flanks = np.diff(combinedMask)
uFlanks = [idx for idx, x in enumerate(flanks) if x == 1]
dFlanks = [idx for idx, x in enumerate(flanks) if x == -1]
if self.previousRegion and dFlanks:
start = self.previousRegion
end = (idx * hopSize + dFlanks[0] + s) / sampleRate
duration = start - end
if duration > minimumDuration:
starts.append(start)
ends.append(end)
self.previousRegion = None
del dFlanks[0]
if len(dFlanks) is not len(uFlanks):
self.previousRegion = (idx * hopSize + uFlanks[-1] + s) / sampleRate
del uFlanks[-1]
if len(dFlanks) is not len(uFlanks):
raise EssentiaException(
"Ath this point uFlanks ({}) and dFlanks ({}) "
"are expected to have the same length!".format(len(dFlanks),
len(uFlanks)))
for idx in range(len(uFlanks)):
start = float(idx * hopSize + uFlanks[idx] + s) / sampleRate
end = float(idx * hopSize + dFlanks[idx] + s) / sampleRate
duration = end - start
if duration > minimumDuration:
starts.append(start)
ends.append(end)
for start in starts:
y.append(start)
idx += 1
return esarr(y)
if __name__ == '__main__':
folder = 'saturationdetector'
# Instantiating wrappers
wrappers = [
DevWrap('events'),
EssentiaWrap('events'),
]
# Instantiating the test
qa = QaTestEvents(verbose=True)
# Add the wrappers to the test the wrappers
qa.set_wrappers(wrappers)
data_dir = '../../audio/recorded/distorted.wav'
qa.load_audio(filename=data_dir) # works for a single
qa.load_solution(data_dir, ground_true=True)
# Compute and the results, the scores and and compare the computation times.
qa.compute_all(output_file='{}/compute.log'.format(folder))
# TODO Generate Ground truth to test this.
for sol, val in qa.solutions.items():
print('{}'.format(sol))
for v in val:
print('{:.3f}'.format(v))
| carthach/essentia | test/src/QA/saturationdetector/test_saturationdetector.py | Python | agpl-3.0 | 4,902 |
# Events in admin log.
LOG_EVENTS = ['all', 'login', 'active', 'create', 'delete', 'disable', 'update', 'backup']
| villaverde/iredadmin | libs/panel/__init__.py | Python | gpl-2.0 | 114 |
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from . import models, wizard
| OCA/sale-reporting | sale_backorder/__init__.py | Python | agpl-3.0 | 94 |
#
# Tuples.py
# HackerRank
#
# Created by Fabrizio Duroni on 16/10/17.
#
# https://www.hackerrank.com/challenges/python-tuples
n = int(raw_input())
integer_list = map(int, raw_input().split())
tupla = tuple(integer_list)
print(hash(tupla))
| chicio/HackerRank | Python/Tuples.py | Python | mit | 242 |
# -*- coding: utf-8 -*-
"""
Routes for api v2 endpoints
"""
from django.conf.urls import include, url
from rest_framework import routers
from api.v2 import views
from api.base import views as base_views
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'accounts', views.AccountViewSet, base_name='account')
router.register(
r'allocations',
views.AllocationViewSet,
base_name='allocation')
router.register(r'allocation_sources', views.AllocationSourceViewSet)
router.register(r'boot_scripts', views.BootScriptViewSet)
router.register(r'credentials', views.CredentialViewSet)
router.register(r'events', views.EventViewSet, base_name='event')
router.register(r'email_template', views.EmailTemplateViewSet)
router.register(r'email_feedback', views.FeedbackEmailViewSet, base_name='email-feedback')
router.register(r'email_instance_report', views.InstanceSupportEmailViewSet, base_name='instance-email-support')
router.register(r'email_volume_report', views.VolumeSupportEmailViewSet, base_name='volume-email-support')
router.register(r'email_request_resources', views.ResourceEmailViewSet, base_name='email-request-resources')
router.register(r'emulate_token', views.TokenEmulateViewSet, base_name='emulate-token')
router.register(r'emulate_session', views.SessionEmulateViewSet, base_name='emulate-session')
router.register(r'help_links', views.HelpLinkViewSet)
router.register(r'identities', views.IdentityViewSet)
router.register(r'identity_memberships', views.IdentityMembershipViewSet, base_name='identitymembership')
router.register(r'images', views.ImageViewSet, base_name='application')
router.register(r'image_bookmarks', views.ImageBookmarkViewSet)
router.register(r'image_tags', views.ImageTagViewSet)
router.register(
r'image_versions',
views.ImageVersionViewSet,
base_name='imageversion')
router.register(
r'image_version_licenses',
views.ImageVersionLicenseViewSet,
base_name='imageversion_license')
router.register(
r'image_version_memberships',
views.ImageVersionMembershipViewSet,
base_name='imageversion_membership')
router.register(
r'image_version_boot_scripts',
views.ImageVersionBootScriptViewSet,
base_name='imageversion_bootscript')
router.register(r'instances', views.InstanceViewSet, base_name='instance')
router.register(r'instance_actions',
views.InstanceActionViewSet,
base_name='instanceaction')
router.register(r'instance_histories',
views.InstanceStatusHistoryViewSet,
base_name='instancestatushistory')
router.register(r'instance_tags', views.InstanceTagViewSet)
router.register(r'licenses', views.LicenseViewSet)
router.register(r'links', views.ExternalLinkViewSet)
router.register(r'machine_requests', views.MachineRequestViewSet)
router.register(r'maintenance_records', views.MaintenanceRecordViewSet)
router.register(r'metrics', views.MetricViewSet)
router.register(r'platform_types', views.PlatformTypeViewSet)
router.register(r'projects', views.ProjectViewSet)
router.register(r'project_links', views.ProjectExternalLinkViewSet, base_name='projectlinks')
router.register(r'project_images', views.ProjectApplicationViewSet)
router.register(r'project_instances', views.ProjectInstanceViewSet)
router.register(r'project_volumes', views.ProjectVolumeViewSet)
router.register(r'providers', views.ProviderViewSet)
router.register(
r'provider_machines',
views.ProviderMachineViewSet,
base_name='providermachine')
router.register(r'provider_types', views.ProviderTypeViewSet, base_name='providertype')
router.register(r'quotas', views.QuotaViewSet)
router.register(r'resource_requests', views.ResourceRequestViewSet)
router.register(r'reporting', views.ReportingViewSet, base_name='reporting')
router.register(r'sizes', views.SizeViewSet)
router.register(r'status_types', views.StatusTypeViewSet)
router.register(r'tags', views.TagViewSet)
router.register(r'token_update', views.TokenUpdateViewSet, base_name='token_update')
router.register(r'tokens', views.TokenViewSet, base_name='token')
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet, base_name='group')
router.register(r'volumes', views.VolumeViewSet, base_name='volume')
router.register(r'ssh_keys', views.SSHKeyViewSet, base_name='ssh_key')
router.register(r'version', base_views.VersionViewSet,
base_name='version-atmo')
router.register(r'deploy_version', base_views.DeployVersionViewSet,
base_name='version-deploy')
api_v2_urls = router.urls
urlpatterns = [url(r'^', include(api_v2_urls))]
| CCI-MOC/GUI-Backend | api/v2/urls.py | Python | apache-2.0 | 4,611 |
def Working_out_current(Junction,Power_dictionary,Dont_do_coming_from_self = False ):
current_supplying_at_Receiving_voltage = 0
tuple_Junction = tuple([tuple(Junction[0]),Junction[1]])
if len(Power_dictionary[tuple_Junction]['current coming from']) > 1:
for current_in in Power_dictionary[tuple_Junction]['current coming from'].values():
if Dont_do_coming_from_self:
if current_in[1]:
current_supplying_at_Receiving_voltage += current_in[0]
else:
current_supplying_at_Receiving_voltage += current_in[0]
else:
for Current in Power_dictionary[tuple_Junction]['current coming from'].values():
if Dont_do_coming_from_self:
if Current[1]:
current_supplying_at_Receiving_voltage = Current[0]
else:
current_supplying_at_Receiving_voltage = 0
else:
current_supplying_at_Receiving_voltage = Current[0]
return(current_supplying_at_Receiving_voltage)
def Working_out_Support_current(Junction,Destination,Power_dictionary):
current_supplying_at_Receiving_voltage = 0
tuple_Junction = tuple([tuple(Junction[0]),Junction[1]])
Checked_support = []
for Unchecked_support in Power_dictionary[tuple_Junction]['current coming from Support'].values():
if Unchecked_support[1] == Destination:
Checked_support.append(Unchecked_support)
if len(Checked_support) > 1:
for current_in in Checked_support:
current_supplying_at_Receiving_voltage += current_in[0]
else:
for Current in Checked_support:
current_supplying_at_Receiving_voltage = Current[0]
return(current_supplying_at_Receiving_voltage)
def Working_out_resistance(Junction,Power_dictionary):
ResistanceX_all = 0
Resistance = 0
tuple_Junction = tuple([tuple(Junction[0]),Junction[1]])
if tuple_Junction in Power_dictionary:
if len(Power_dictionary[tuple_Junction]['Resistance from cabeis']) > 1:
#print('more then one', Power_dictionary[tuple(Junction)]['being redsed from'] )
for Resistance in Power_dictionary[tuple_Junction]['Resistance from cabeis'].values():
if Resistance[0]:
ResistanceX_all += 1/Resistance[0]
if ResistanceX_all:
Resistance = 1/ResistanceX_all
else:
Resistance = 0
else:
for value in Power_dictionary[tuple_Junction]['Resistance from cabeis'].values():
Resistance = value[0]
return(Resistance)
def Working_out_all_resistance(Junction,Power_dictionary):
ResistanceX_all = 0
Resistance = 0
tuple_Junction = tuple([tuple(Junction[0]),Junction[1]])
if tuple_Junction in Power_dictionary:
if len(Power_dictionary[tuple_Junction]['Resistance from cabeis']) > 1:
#print('more then one', Power_dictionary[tuple(Junction)]['being redsed from'] )
for Resistance_pop in Power_dictionary[tuple_Junction]['Resistance from cabeis'].values():
if Resistance_pop[0]:
ResistanceX_all += 1/Resistance_pop[0]
if ResistanceX_all:
Resistance = ResistanceX_all
else:
Resistance = 0
else:
for value in Power_dictionary[tuple_Junction]['Resistance from cabeis'].values():
if value[0]:
Resistance = 1/value[0]
#print('hr 1')
if 'Parallel Resistance from cabeis' in Power_dictionary[tuple_Junction]:
#print('hr 2')
if len(Power_dictionary[tuple_Junction]['Parallel Resistance from cabeis']) > 1:
#print('hr 3.0')
ResistanceX_all = 0
for Resistance_pop in Power_dictionary[tuple_Junction]['Parallel Resistance from cabeis'].values():
if Resistance_pop[0]:
ResistanceX_all += 1/Resistance_pop[0]
if ResistanceX_all:
Resistance_sub = ResistanceX_all
else:
Resistance_sub = 0
Resistance += Resistance_sub
else:
#print('hr 3.1')
for value in Power_dictionary[tuple_Junction]['Parallel Resistance from cabeis'].values():
if value[0]:
Resistance += 1/value[0]
#print(value[0])
if Resistance:
True_resistance = 1/Resistance
return(True_resistance)
def Working_out_resistance_Modified(Junction,Power_dictionary):
ResistanceX_all = 0
Resistance = 0
tuple_Junction = tuple([tuple(Junction[0]),Junction[1]])
if tuple_Junction in Power_dictionary:
if len(Power_dictionary[tuple_Junction]['Resistance from modified']) > 1:
#print('more then one', Power_dictionary[tuple(Junction)]['being redsed from'] )
for Resistance in Power_dictionary[tuple_Junction]['Resistance from modified'].values():
if Resistance[0]:
ResistanceX_all += 1/Resistance[0]
if ResistanceX_all:
Resistance = 1/ResistanceX_all
else:
Resistance = 0
else:
for value in Power_dictionary[tuple_Junction]['Resistance from modified'].values():
Resistance = value[0]
return(Resistance)
def Working_out_all_resistance_Modified(Junction,Power_dictionary):
ResistanceX_all = 0
Resistance = 0
tuple_Junction = tuple([tuple(Junction[0]),Junction[1]])
if tuple_Junction in Power_dictionary:
if len(Power_dictionary[tuple_Junction]['Resistance from modified']) > 1:
#print('more then one', Power_dictionary[tuple(Junction)]['being redsed from'] )
for Resistance_pop in Power_dictionary[tuple_Junction]['Resistance from modified'].values():
if Resistance_pop[0]:
ResistanceX_all += 1/Resistance_pop[0]
if ResistanceX_all:
Resistance = ResistanceX_all
else:
Resistance = 0
else:
for value in Power_dictionary[tuple_Junction]['Resistance from modified'].values():
if value[0]:
Resistance = 1/value[0]
#print('hr 1')
if 'Parallel Resistance from cabeis modified' in Power_dictionary[tuple_Junction]:
#print('hr 2')
if len(Power_dictionary[tuple_Junction]['Parallel Resistance from cabeis modified']) > 1:
#print('hr 3.0')
ResistanceX_all = 0
for Resistance_pop in Power_dictionary[tuple_Junction]['Parallel Resistance from cabeis modified'].values():
if Resistance_pop[0]:
ResistanceX_all += 1/Resistance_pop[0]
if ResistanceX_all:
Resistance_sub = ResistanceX_all
else:
Resistance_sub = 0
Resistance += Resistance_sub
else:
#print('hr 3.1')
for value in Power_dictionary[tuple_Junction]['Parallel Resistance from cabeis modified'].values():
if value[0]:
Resistance += 1/value[0]#
#print(value[0])
if Resistance:
True_resistance = 1/Resistance
return(True_resistance)
def Battery_calculations(Battery,Power_dictionary,PPSD,is_working_backwards,Current = 0,Voltage = 0,Resistance = 0):
#print(Battery,'yooooo?s')
Battery_tuple = tuple([tuple(Battery[0]),Battery[1]])
if is_working_backwards:
if Battery_tuple in PPSD:
if 'Receiving voltage' in Power_dictionary[Battery_tuple]:
current_supplying_at_Receiving_voltage = Current
Receiving_voltage = Voltage
Battery_charge(Battery,
Power_dictionary,
PPSD,
is_working_backwards,
Receiving_voltage
)
elif 'Format_for_sub_syston' in Power_dictionary[Battery_tuple]:
sub_syston_TOP = Power_dictionary[Battery_tuple]['sub syston TOP'][0]
sub_syston_TOP_tuple = tuple([tuple(sub_syston_TOP[0]),sub_syston_TOP[1]])
Resistance = Power_dictionary[sub_syston_TOP_tuple]['sub syston TOP'][1]
current_supplying_at_Receiving_voltage = Power_dictionary[sub_syston_TOP_tuple]['sub syston TOP'][2]
Receiving_voltage = current_supplying_at_Receiving_voltage*Resistance
else:
current_supplying_at_Receiving_voltage = 0
Receiving_voltage = 0
#print(Receiving_voltage,'Minimum_support_voltage', PPSD[Battery_tuple]['Minimum_support_voltage'])
if Receiving_voltage < PPSD[Battery_tuple]['Minimum_support_voltage']:
#print('yo adding',PPSD[Battery_tuple]['Standard_supplying_voltage'])
Pulling_Voltage = PPSD[Battery_tuple]['Standard_supplying_voltage'] - Receiving_voltage
Required_current = PPSD[Battery_tuple]['Standard_supplying_voltage']/Resistance
Receiving_watts = Receiving_voltage * current_supplying_at_Receiving_voltage
Required_watts = PPSD[Battery_tuple]['Standard_supplying_voltage'] * Required_current
Pulling_watts = Required_watts - Receiving_watts
Current_capacity = PPSD[Battery_tuple]['Current_capacity']
adding_current = Required_current - current_supplying_at_Receiving_voltage
if Pulling_watts > PPSD[Battery_tuple]['Maximum_watts_support']:
Pulling_watts = PPSD[Battery_tuple]['Maximum_watts_support']
if adding_current > PPSD[Battery_tuple]['Maximum_Current_support']:
adding_current = PPSD[Battery_tuple]['Maximum_Current_support']
#print(Current_capacity,Pulling_watts)
Current_charge = Current_capacity - Pulling_watts
#print(Current_charge)
#print('adding_current',adding_current)
if Current_charge <= 0:
#print('eer')
if is_working_backwards:
PPSD[Battery_tuple]['Current_capacity'] = 0
if 'Format_for_sub_syston' in Power_dictionary[Battery_tuple]:
if 'current coming from Support' in Power_dictionary[Battery_tuple]:
Power_dictionary[Battery_tuple]['current coming from Support'][0] = [adding_current,sub_syston_TOP]
Power_dictionary[Battery_tuple]['Supply current Support'] = Working_out_Support_current(Battery,sub_syston_TOP,Power_dictionary)
else:
Power_dictionary[Battery_tuple]['current coming from Support'] = {0:[adding_current,sub_syston_TOP]}
Power_dictionary[Battery_tuple]['Supply current Support'] = Working_out_Support_current(Battery,sub_syston_TOP,Power_dictionary)
Power_dictionary[sub_syston_TOP_tuple]['sub syston TOP'][2] = Power_dictionary[sub_syston_TOP_tuple]['sub syston TOP'][2] + adding_current
else:
if 'current coming from' in Power_dictionary[Battery_tuple]:
Power_dictionary[Battery_tuple]['current coming from'][0] = [adding_current,0]
else:
Power_dictionary[Battery_tuple]['current coming from'] = {0:[adding_current,0]}
Power_dictionary[Battery_tuple]['Supply current'] = Working_out_current(Battery,Power_dictionary)
Power_dictionary[Battery_tuple]['Supplying voltage'] = Power_dictionary[Battery_tuple]['Supply current'] * Resistance
else:
#print('rewer')
if is_working_backwards:
PPSD[Battery_tuple]['Current_capacity'] = Current_charge
if 'Format_for_sub_syston' in Power_dictionary[Battery_tuple]:
if 'current coming from Support' in Power_dictionary[Battery_tuple]:
Power_dictionary[Battery_tuple]['current coming from Support'][0] = [adding_current,sub_syston_TOP]
Power_dictionary[Battery_tuple]['Supply current Support'] = Working_out_Support_current(Battery,sub_syston_TOP,Power_dictionary)
else:
Power_dictionary[Battery_tuple]['current coming from Support'] = {0:[adding_current,sub_syston_TOP]}
Power_dictionary[Battery_tuple]['Supply current Support'] = Working_out_Support_current(Battery,sub_syston_TOP,Power_dictionary)
else:
#print('yay',adding_current )
if 'current coming from' in Power_dictionary[Battery_tuple]:
Power_dictionary[Battery_tuple]['current coming from'][0] = [adding_current,0]
else:
Power_dictionary[Battery_tuple]['current coming from'] = {0:[adding_current,0]}
#print()
Power_dictionary[Battery_tuple]['Supply current'] = Working_out_current(Battery,Power_dictionary)
Power_dictionary[Battery_tuple]['Supplying voltage'] = Power_dictionary[Battery_tuple]['Supply current'] * Resistance
#print(Power_dictionary[Battery_tuple]['current coming from'])
else:
if 'Format_for_sub_syston' in Power_dictionary[Battery_tuple]:
if 'current coming from Support' in Power_dictionary[Battery_tuple]:
Power_dictionary[Battery_tuple]['current coming from Support'][0] = [0,sub_syston_TOP]
Power_dictionary[Battery_tuple]['Supply current Support'] = Working_out_Support_current(Battery,sub_syston_TOP,Power_dictionary)
else:
Power_dictionary[Battery_tuple]['current coming from Support'] = {0:[0,sub_syston_TOP]}
Power_dictionary[Battery_tuple]['Supply current Support'] = Working_out_Support_current(Battery,sub_syston_TOP,Power_dictionary)
else:
Power_dictionary[Battery_tuple]['Supplying voltage'] = Voltage
Power_dictionary[Battery_tuple]['Supply current'] = Current
if is_working_backwards:
if 'Receiving voltage' in Power_dictionary[Battery_tuple]:
Power_dictionary[Battery_tuple]['sub syston TOP'] = [
Battery,
Resistance,
Power_dictionary[Battery_tuple]['Supply current']
]
elif not 'Format_for_sub_syston' in Power_dictionary[Battery_tuple]:
Power_dictionary[Battery_tuple]['sub syston TOP'] = [
Battery,
Resistance,
Power_dictionary[Battery_tuple]['Supply current']
]
if not is_working_backwards:
Battery_charge(Battery,
Power_dictionary,
PPSD,
is_working_backwards,
)
def Transformer_Calculations(Transformer,Power_dictionary,PPSD,Resistance,current,voltage,is_working_backwards,Support_supply_formatting):
Transformer_tuple = tuple([tuple(Transformer[0]),Transformer[1]])
Power_dictionary[Transformer_tuple]['Use Resistance from modified'] = True
R2 = Resistance
I2 = PPSD[Transformer_tuple]['Expected_output']/Resistance
V2 = PPSD[Transformer_tuple]['Expected_output']
Turn_ratio = PPSD[Transformer_tuple]['Turn_ratio']
V1 = (V2*Turn_ratio)
I1 = (V2/V1)*I2
R1 = PPSD[Transformer_tuple]['Expected_input']/I1
Resistance_sub = Working_out_resistance(Transformer,Power_dictionary)
if Resistance_sub:
R2_SUB = Resistance_sub
I2_SUB = PPSD[Transformer_tuple]['Expected_output']/Resistance_sub
V2_SUB = PPSD[Transformer_tuple]['Expected_output']
V1_SUB = (V2_SUB * Turn_ratio)
I1_SUB = (V2_SUB/V1_SUB)*I2_SUB
R1_SUB = PPSD[Transformer_tuple]['Expected_input']/I1_SUB
else:
R1_SUB = 0
if 'Resistance from modified' in Power_dictionary[Transformer_tuple]:
Power_dictionary[Transformer_tuple]['Resistance from modified'][0] = [R1_SUB,0]
else:
Power_dictionary[Transformer_tuple]['Resistance from modified'] = {0:[R1_SUB,0]}
#print(R1_SUB,'R1_SUB')
if 'Parallel Resistance from cabeis' in Power_dictionary[Transformer_tuple]:
Power_dictionary[Transformer_tuple]['Parallel Resistance from cabeis modified'] = {}
for key, value in Power_dictionary[Transformer_tuple]['Parallel Resistance from cabeis'].items():
R2_SUB = value[0]
if R2_SUB:
I2_SUB = PPSD[Transformer_tuple]['Expected_output']/Resistance
V2_SUB = PPSD[Transformer_tuple]['Expected_output']
V1_SUB = (V2_SUB*Turn_ratio)
I1_SUB = (V2_SUB/V1_SUB)*I2_SUB
R1_SUB = PPSD[Transformer_tuple]['Expected_input']/I1_SUB
else:
R1_SUB = 0
Power_dictionary[Transformer_tuple]['Parallel Resistance from cabeis modified'][key] = [R1_SUB,value[1]]
Power_dictionary[Transformer_tuple]['Resistance'] = Working_out_resistance_Modified(Transformer,Power_dictionary)
if voltage:
V1 = voltage
I1 = current
R1 = Working_out_resistance_Modified(Transformer,Power_dictionary)
R2 = Resistance
V2 = V1/Turn_ratio
if PPSD[Transformer_tuple]['Voltage_limiting']:
if V2 > PPSD[Transformer_tuple]['Voltage_limiting']:
V2 = PPSD[Transformer_tuple]['Voltage_limited_to']
I2 = (V2/V1)*I1
I2 = V2/Resistance
Power_dictionary[Transformer_tuple]['Supplying voltage'] = V2
Power_dictionary[Transformer_tuple]['Supply current'] = I2
if is_working_backwards:
if not 'Format_for_sub_syston' in Power_dictionary[Transformer_tuple]:
if Support_supply_formatting:
if not 'Supply current' in Power_dictionary[Transformer_tuple]:
Supply_current = 0
else:
Supply_current = Power_dictionary[Transformer_tuple]['Supply current']
Power_dictionary[Transformer_tuple]['sub syston TOP'] = [
Transformer,
Resistance,
Supply_current
]
return([V2,I2,R2])
#(Battery,Power_dictionary,PPSD,is_working_backwards)
def Battery_charge(Battery,PD,PPSD,IWB,voltage = 0):
#print('hey1')
Tuple_Battery = tuple([tuple(Battery[0]),Battery[1]])
if IWB:
#print('pop1')
Receiving_voltage = voltage
if Receiving_voltage >= PPSD[Tuple_Battery]['Increased_charge_voltage']:
#print(PPSD[Tuple_Battery]['If_voltage_charge'])
PPSD[Tuple_Battery]['Pulling'] = False
if PPSD[Tuple_Battery]['Charging']:
#print('pop3')
PPSD[Tuple_Battery]['Current_capacity'] = PPSD[Tuple_Battery]['If_voltage_charge']
if not PPSD[Tuple_Battery]['Charging_multiplier'] >= PPSD[Tuple_Battery]['Max_charging_multiplier']:
#print('adding')
Charging_multiplier = PPSD[Tuple_Battery]['Charging_multiplier']
PPSD[Tuple_Battery]['Charging_multiplier'] = Charging_multiplier + PPSD[Tuple_Battery]['Charge_steps']
elif Receiving_voltage >= PPSD[Tuple_Battery]['Extra_charge_cut_off']:
#print('pop2')
PPSD[Tuple_Battery]['Pulling'] = False
if PPSD[Tuple_Battery]['Charging']:
#print('pop3')
PPSD[Tuple_Battery]['Current_capacity'] = PPSD[Tuple_Battery]['If_voltage_charge']
else:
PPSD[Tuple_Battery]['Pulling'] = True
else:
#print('heytyy?')
if PPSD[Tuple_Battery]['Pulling'] == False:
Charge_calculations(Battery,PD,PPSD,IWB)
else:
if not 0.0 >= PPSD[Tuple_Battery]['Charging_multiplier']:
Charging_multiplier = PPSD[Tuple_Battery]['Charging_multiplier']
PPSD[Tuple_Battery]['Charging_multiplier'] = Charging_multiplier - PPSD[Tuple_Battery]['Charge_steps']
Charge_calculations(Battery,PD,PPSD,IWB)
def Charge_calculations(Battery,PD,PPSD,IWB):
Tuple_Battery = tuple([tuple(Battery[0]),Battery[1]])
if not PPSD[Tuple_Battery]['Charging_multiplier'] == 0:
PPSD[Tuple_Battery]['Charging'] = True
Current_capacity = PPSD[Tuple_Battery]['Current_capacity']
Capacity_max = PPSD[Tuple_Battery]['Capacity_max']
if Current_capacity < Capacity_max:
#print(PPSD[Tuple_Battery]['Charging_multiplier'])
Charging_current = PPSD[Tuple_Battery]['Standard_charge_current'] * PPSD[Tuple_Battery]['Charging_multiplier']
Charging_watts = (PPSD[Tuple_Battery]['Supply_voltage'] * Charging_current)
New_current_capacity = Current_capacity + Charging_watts
#print(New_current_capacity)
if New_current_capacity >= Capacity_max:
PPSD[Tuple_Battery]['If_voltage_charge'] = Capacity_max
else:
PPSD[Tuple_Battery]['If_voltage_charge'] = New_current_capacity
if 'Resistance from modified' in PD[Tuple_Battery]:
PD[Tuple_Battery]['Resistance from modified']['Battery'] = [(PPSD[Tuple_Battery]['Supply_voltage']/Charging_current),0]
else:
PD[Tuple_Battery]['Resistance from modified'] = {0:[PPSD[Tuple_Battery]['Supply_voltage']/Charging_current,0]}
PD[Tuple_Battery]['Resistance'] = Working_out_resistance_Modified(Battery,PD)
else:
PPSD[Tuple_Battery]['Charging'] = False
| Necromunger/unitystation | Reference Material/Bod9001_python_Power_System/Power_Functions.py | Python | agpl-3.0 | 24,042 |
# coding: utf-8
"""
ASN.1 type classes for the time stamp protocol (TSP). Exports the following
items:
- TimeStampReq()
- TimeStampResp()
Also adds TimeStampedData() support to asn1crypto.cms.ContentInfo(),
TimeStampedData() and TSTInfo() support to
asn1crypto.cms.EncapsulatedContentInfo() and some oids and value parsers to
asn1crypto.cms.CMSAttribute().
Other type classes are defined that help compose the types listed above.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from .algos import DigestAlgorithm
from .cms import (
CMSAttribute,
CMSAttributeType,
ContentInfo,
ContentType,
EncapsulatedContentInfo,
)
from .core import (
Any,
BitString,
Boolean,
Choice,
GeneralizedTime,
IA5String,
Integer,
ObjectIdentifier,
OctetString,
Sequence,
SequenceOf,
SetOf,
UTF8String,
)
from .crl import CertificateList
from .x509 import (
Attributes,
CertificatePolicies,
GeneralName,
GeneralNames,
)
# The structures in this file are based on https://tools.ietf.org/html/rfc3161,
# https://tools.ietf.org/html/rfc4998, https://tools.ietf.org/html/rfc5544,
# https://tools.ietf.org/html/rfc5035, https://tools.ietf.org/html/rfc2634
class Version(Integer):
_map = {
0: 'v0',
1: 'v1',
2: 'v2',
3: 'v3',
4: 'v4',
5: 'v5',
}
class MessageImprint(Sequence):
_fields = [
('hash_algorithm', DigestAlgorithm),
('hashed_message', OctetString),
]
class Accuracy(Sequence):
_fields = [
('seconds', Integer, {'optional': True}),
('millis', Integer, {'implicit': 0, 'optional': True}),
('micros', Integer, {'implicit': 1, 'optional': True}),
]
class Extension(Sequence):
_fields = [
('extn_id', ObjectIdentifier),
('critical', Boolean, {'default': False}),
('extn_value', OctetString),
]
class Extensions(SequenceOf):
_child_spec = Extension
class TSTInfo(Sequence):
_fields = [
('version', Version),
('policy', ObjectIdentifier),
('message_imprint', MessageImprint),
('serial_number', Integer),
('gen_time', GeneralizedTime),
('accuracy', Accuracy, {'optional': True}),
('ordering', Boolean, {'default': False}),
('nonce', Integer, {'optional': True}),
('tsa', GeneralName, {'explicit': 0, 'optional': True}),
('extensions', Extensions, {'implicit': 1, 'optional': True}),
]
class TimeStampReq(Sequence):
_fields = [
('version', Version),
('message_imprint', MessageImprint),
('req_policy', ObjectIdentifier, {'optional': True}),
('nonce', Integer, {'optional': True}),
('cert_req', Boolean, {'default': False}),
('extensions', Extensions, {'implicit': 0, 'optional': True}),
]
class PKIStatus(Integer):
_map = {
0: 'granted',
1: 'granted_with_mods',
2: 'rejection',
3: 'waiting',
4: 'revocation_warning',
5: 'revocation_notification',
}
class PKIFreeText(SequenceOf):
_child_spec = UTF8String
class PKIFailureInfo(BitString):
_map = {
0: 'bad_alg',
2: 'bad_request',
5: 'bad_data_format',
14: 'time_not_available',
15: 'unaccepted_policy',
16: 'unaccepted_extensions',
17: 'add_info_not_available',
25: 'system_failure',
}
class PKIStatusInfo(Sequence):
_fields = [
('status', PKIStatus),
('status_string', PKIFreeText, {'optional': True}),
('fail_info', PKIFailureInfo, {'optional': True}),
]
class TimeStampResp(Sequence):
_fields = [
('status', PKIStatusInfo),
('time_stamp_token', ContentInfo),
]
class MetaData(Sequence):
_fields = [
('hash_protected', Boolean),
('file_name', UTF8String, {'optional': True}),
('media_type', IA5String, {'optional': True}),
('other_meta_data', Attributes, {'optional': True}),
]
class TimeStampAndCRL(SequenceOf):
_fields = [
('time_stamp', EncapsulatedContentInfo),
('crl', CertificateList, {'optional': True}),
]
class TimeStampTokenEvidence(SequenceOf):
_child_spec = TimeStampAndCRL
class DigestAlgorithms(SequenceOf):
_child_spec = DigestAlgorithm
class EncryptionInfo(Sequence):
_fields = [
('encryption_info_type', ObjectIdentifier),
('encryption_info_value', Any),
]
class PartialHashtree(SequenceOf):
_child_spec = OctetString
class PartialHashtrees(SequenceOf):
_child_spec = PartialHashtree
class ArchiveTimeStamp(Sequence):
_fields = [
('digest_algorithm', DigestAlgorithm, {'implicit': 0, 'optional': True}),
('attributes', Attributes, {'implicit': 1, 'optional': True}),
('reduced_hashtree', PartialHashtrees, {'implicit': 2, 'optional': True}),
('time_stamp', ContentInfo),
]
class ArchiveTimeStampSequence(SequenceOf):
_child_spec = ArchiveTimeStamp
class EvidenceRecord(Sequence):
_fields = [
('version', Version),
('digest_algorithms', DigestAlgorithms),
('crypto_infos', Attributes, {'implicit': 0, 'optional': True}),
('encryption_info', EncryptionInfo, {'implicit': 1, 'optional': True}),
('archive_time_stamp_sequence', ArchiveTimeStampSequence),
]
class OtherEvidence(Sequence):
_fields = [
('oe_type', ObjectIdentifier),
('oe_value', Any),
]
class Evidence(Choice):
_alternatives = [
('tst_evidence', TimeStampTokenEvidence, {'implicit': 0}),
('ers_evidence', EvidenceRecord, {'implicit': 1}),
('other_evidence', OtherEvidence, {'implicit': 2}),
]
class TimeStampedData(Sequence):
_fields = [
('version', Version),
('data_uri', IA5String, {'optional': True}),
('meta_data', MetaData, {'optional': True}),
('content', OctetString, {'optional': True}),
('temporal_evidence', Evidence),
]
class IssuerSerial(Sequence):
_fields = [
('issuer', GeneralNames),
('serial_number', Integer),
]
class ESSCertID(Sequence):
_fields = [
('cert_hash', OctetString),
('issuer_serial', IssuerSerial, {'optional': True}),
]
class ESSCertIDs(SequenceOf):
_child_spec = ESSCertID
class SigningCertificate(Sequence):
_fields = [
('certs', ESSCertIDs),
('policies', CertificatePolicies, {'optional': True}),
]
class SetOfSigningCertificates(SetOf):
_child_spec = SigningCertificate
class ESSCertIDv2(Sequence):
_fields = [
('hash_algorithm', DigestAlgorithm, {'default': {'algorithm': 'sha256'}}),
('cert_hash', OctetString),
('issuer_serial', IssuerSerial, {'optional': True}),
]
class ESSCertIDv2s(SequenceOf):
_child_spec = ESSCertIDv2
class SigningCertificateV2(Sequence):
_fields = [
('certs', ESSCertIDv2s),
('policies', CertificatePolicies, {'optional': True}),
]
class SetOfSigningCertificatesV2(SetOf):
_child_spec = SigningCertificateV2
EncapsulatedContentInfo._oid_specs['tst_info'] = TSTInfo
EncapsulatedContentInfo._oid_specs['timestamped_data'] = TimeStampedData
ContentInfo._oid_specs['timestamped_data'] = TimeStampedData
ContentType._map['1.2.840.113549.1.9.16.1.4'] = 'tst_info'
ContentType._map['1.2.840.113549.1.9.16.1.31'] = 'timestamped_data'
CMSAttributeType._map['1.2.840.113549.1.9.16.2.12'] = 'signing_certificate'
CMSAttribute._oid_specs['signing_certificate'] = SetOfSigningCertificates
CMSAttributeType._map['1.2.840.113549.1.9.16.2.47'] = 'signing_certificate_v2'
CMSAttribute._oid_specs['signing_certificate_v2'] = SetOfSigningCertificatesV2
| ctrlaltdel/neutrinator | vendor/asn1crypto/tsp.py | Python | gpl-3.0 | 7,827 |
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import webnotes
def execute(filters=None):
columns = get_columns()
proj_details = get_project_details()
pr_item_map = get_purchased_items_cost()
se_item_map = get_issued_items_cost()
dn_item_map = get_delivered_items_cost()
data = []
for project in proj_details:
data.append([project.name, pr_item_map.get(project.name, 0),
se_item_map.get(project.name, 0), dn_item_map.get(project.name, 0),
project.project_name, project.status, project.company,
project.customer, project.project_value, project.project_start_date,
project.completion_date])
return columns, data
def get_columns():
return ["Project Id:Link/Project:140", "Cost of Purchased Items:Currency:160",
"Cost of Issued Items:Currency:160", "Cost of Delivered Items:Currency:160",
"Project Name::120", "Project Status::120", "Company:Link/Company:100",
"Customer:Link/Customer:140", "Project Value:Currency:120",
"Project Start Date:Date:120", "Completion Date:Date:120"]
def get_project_details():
return webnotes.conn.sql(""" select name, project_name, status, company, customer, project_value,
project_start_date, completion_date from tabProject where docstatus < 2""", as_dict=1)
def get_purchased_items_cost():
pr_items = webnotes.conn.sql("""select project_name, sum(amount) as amount
from `tabPurchase Receipt Item` where ifnull(project_name, '') != ''
and docstatus = 1 group by project_name""", as_dict=1)
pr_item_map = {}
for item in pr_items:
pr_item_map.setdefault(item.project_name, item.amount)
return pr_item_map
def get_issued_items_cost():
se_items = webnotes.conn.sql("""select se.project_name, sum(se_item.amount) as amount
from `tabStock Entry` se, `tabStock Entry Detail` se_item
where se.name = se_item.parent and se.docstatus = 1 and ifnull(se_item.t_warehouse, '') = ''
and ifnull(se.project_name, '') != '' group by se.project_name""", as_dict=1)
se_item_map = {}
for item in se_items:
se_item_map.setdefault(item.project_name, item.amount)
return se_item_map
def get_delivered_items_cost():
dn_items = webnotes.conn.sql("""select dn.project_name, sum(dn_item.amount) as amount
from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item
where dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project_name, '') != ''
group by dn.project_name""", as_dict=1)
si_items = webnotes.conn.sql("""select si.project_name, sum(si_item.amount) as amount
from `tabSales Invoice` si, `tabSales Invoice Item` si_item
where si.name = si_item.parent and si.docstatus = 1 and ifnull(si.update_stock, 0) = 1
and ifnull(si.is_pos, 0) = 1 and ifnull(si.project_name, '') != ''
group by si.project_name""", as_dict=1)
dn_item_map = {}
for item in dn_items:
dn_item_map.setdefault(item.project_name, item.amount)
for item in si_items:
dn_item_map.setdefault(item.project_name, item.amount)
return dn_item_map | gangadhar-kadam/mtn-erpnext | projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py | Python | agpl-3.0 | 3,627 |
# +--------------------------------------------------------------------------+
# | Licensed Materials - Property of IBM |
# | |
# | (C) Copyright IBM Corporation 2009-2013. |
# +--------------------------------------------------------------------------+
# | This module complies with Django 1.0 and is |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable |
# | law or agreed to in writing, software distributed under the License is |
# | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
# | KIND, either express or implied. See the License for the specific |
# | language governing permissions and limitations under the License. |
# +--------------------------------------------------------------------------+
# | Authors: Ambrish Bhargava, Tarun Pasrija, Rahul Priyadarshi |
# +--------------------------------------------------------------------------+
# Importing IBM_DB wrapper ibm_db_dbi
try:
import ibm_db_dbi as Database
except ImportError as e:
raise ImportError(
"ibm_db module not found. Install ibm_db module from http://code.google.com/p/ibm-db/. Error: %s" % e)
import datetime
# For checking django's version
from django import VERSION as djangoVersion
if djangoVersion[0:2] > (1, 1):
from django.db import utils
import sys
if djangoVersion[0:2] >= (1, 4):
from django.utils import timezone
from django.conf import settings
import warnings
if djangoVersion[0:2] >= (1, 5):
from django.utils.encoding import force_bytes, force_text
from django.utils import six
import re
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
if djangoVersion[0:2] >= (1, 6):
Error = Database.Error
InterfaceError = Database.InterfaceError
DataError = Database.DataError
OperationalError = Database.OperationalError
InternalError = Database.InternalError
ProgrammingError = Database.ProgrammingError
NotSupportedError = Database.NotSupportedError
class DatabaseWrapper(object):
# Get new database connection for non persistance connection
def get_new_connection(self, kwargs):
kwargsKeys = list(kwargs.keys())
if ( kwargsKeys.__contains__('port') and
kwargsKeys.__contains__('host') ):
kwargs['dsn'] = "DATABASE=%s;HOSTNAME=%s;PORT=%s;PROTOCOL=TCPIP;" % (
kwargs.get('database'),
kwargs.get('host'),
kwargs.get('port')
)
else:
kwargs['dsn'] = kwargs.get('database')
# Before Django 1.6, autocommit was turned OFF
if djangoVersion[0:2] >= (1, 6):
conn_options = {Database.SQL_ATTR_AUTOCOMMIT: Database.SQL_AUTOCOMMIT_ON}
else:
conn_options = {Database.SQL_ATTR_AUTOCOMMIT: Database.SQL_AUTOCOMMIT_OFF}
kwargs['conn_options'] = conn_options
if kwargsKeys.__contains__('options'):
kwargs.update(kwargs.get('options'))
del kwargs['options']
if kwargsKeys.__contains__('port'):
del kwargs['port']
pconnect_flag = False
if kwargsKeys.__contains__('PCONNECT'):
pconnect_flag = kwargs['PCONNECT']
del kwargs['PCONNECT']
if pconnect_flag:
connection = Database.pconnect(**kwargs)
else:
connection = Database.connect(**kwargs)
connection.autocommit = connection.set_autocommit
return connection
def is_active(self, connection):
return Database.ibm_db.active(connection.conn_handler)
# Over-riding _cursor method to return DB2 cursor.
def _cursor(self, connection):
return DB2CursorWrapper(connection)
def close(self, connection):
connection.close()
def get_server_version(self, connection):
self.connection = connection
if not self.connection:
self.cursor()
return tuple(int(version) for version in self.connection.server_info()[1].split("."))
class DB2CursorWrapper(Database.Cursor):
"""
This is the wrapper around IBM_DB_DBI in order to support format parameter style
IBM_DB_DBI supports qmark, where as Django support format style,
hence this conversion is required.
"""
def __init__(self, connection):
super(DB2CursorWrapper, self).__init__(connection.conn_handler, connection)
def __iter__(self):
return self
def __next__(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
def _format_parameters(self, parameters):
parameters = list(parameters)
for index in range(len(parameters)):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and isinstance(parameters[index], datetime.datetime):
param = parameters[index]
if timezone.is_naive(param):
warnings.warn("Received a naive datetime (%s)"
" while time zone support is active." % param,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
param = timezone.make_aware(param, default_timezone)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
parameters[index] = param
return tuple(parameters)
# Over-riding this method to modify SQLs which contains format parameter to qmark.
def execute(self, operation, parameters=()):
try:
if operation.count("db2regexExtraField(%s)") > 0:
operation = operation.replace("db2regexExtraField(%s)", "")
operation = operation % parameters
parameters = ()
if operation.count("%s") > 0:
operation = operation % ( tuple("?" * operation.count("%s")) )
if djangoVersion[0:2] >= (1, 4):
parameters = self._format_parameters(parameters)
if djangoVersion[0:2] <= (1, 1):
return super(DB2CursorWrapper, self).execute(operation, parameters)
else:
try:
return super(DB2CursorWrapper, self).execute(operation, parameters)
except IntegrityError as e:
if djangoVersion[0:2] >= (1, 5):
six.reraise(utils.IntegrityError,
utils.IntegrityError(*tuple(six.PY3 and e.args or ( e._message, ))),
sys.exc_info()[2])
raise
else:
raise utils.IntegrityError(utils.IntegrityError(*tuple(e))).with_traceback(sys.exc_info()[2])
except ProgrammingError as e:
if djangoVersion[0:2] >= (1, 5):
six.reraise(utils.ProgrammingError,
utils.ProgrammingError(*tuple(six.PY3 and e.args or ( e._message, ))),
sys.exc_info()[2])
raise
else:
raise utils.ProgrammingError(utils.ProgrammingError(*tuple(e))).with_traceback(
sys.exc_info()[2])
except DatabaseError as e:
if djangoVersion[0:2] >= (1, 5):
six.reraise(utils.DatabaseError,
utils.DatabaseError(*tuple(six.PY3 and e.args or ( e._message, ))),
sys.exc_info()[2])
raise
else:
raise utils.DatabaseError(utils.DatabaseError(*tuple(e))).with_traceback(sys.exc_info()[2])
except TypeError:
return None
# Over-riding this method to modify SQLs which contains format parameter to qmark.
def executemany(self, operation, seq_parameters):
try:
if operation.count("db2regexExtraField(%s)") > 0:
raise ValueError("Regex not supported in this operation")
if operation.count("%s") > 0:
operation = operation % ( tuple("?" * operation.count("%s")) )
if djangoVersion[0:2] >= (1, 4):
seq_parameters = [self._format_parameters(parameters) for parameters in seq_parameters]
if djangoVersion[0:2] <= (1, 1):
return super(DB2CursorWrapper, self).executemany(operation, seq_parameters)
else:
try:
return super(DB2CursorWrapper, self).executemany(operation, seq_parameters)
except IntegrityError as e:
if djangoVersion[0:2] >= (1, 5):
six.reraise(utils.IntegrityError,
utils.IntegrityError(*tuple(six.PY3 and e.args or ( e._message, ))),
sys.exc_info()[2])
raise
else:
raise utils.IntegrityError(utils.IntegrityError(*tuple(e))).with_traceback(sys.exc_info()[2])
except DatabaseError as e:
if djangoVersion[0:2] >= (1, 5):
six.reraise(utils.DatabaseError,
utils.DatabaseError(*tuple(six.PY3 and e.args or ( e._message, ))),
sys.exc_info()[2])
raise
else:
raise utils.DatabaseError(utils.DatabaseError(*tuple(e))).with_traceback(sys.exc_info()[2])
except ( IndexError, TypeError ):
return None
# Over-riding this method to modify result set containing datetime and time zone support is active
def fetchone(self):
row = super(DB2CursorWrapper, self).fetchone()
if row is None:
return row
else:
return self._fix_return_data(row)
# Over-riding this method to modify result set containing datetime and time zone support is active
def fetchmany(self, size=0):
rows = super(DB2CursorWrapper, self).fetchmany(size)
if rows is None:
return rows
else:
return [self._fix_return_data(row) for row in rows]
# Over-riding this method to modify result set containing datetime and time zone support is active
def fetchall(self):
rows = super(DB2CursorWrapper, self).fetchall()
if rows is None:
return rows
else:
return [self._fix_return_data(row) for row in rows]
# This method to modify result set containing datetime and time zone support is active
def _fix_return_data(self, row):
row = list(row)
index = -1
if djangoVersion[0:2] >= (1, 4):
for value, desc in zip(row, self.description):
index += 1
if desc[1] == Database.DATETIME:
if settings.USE_TZ and value is not None and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
row[index] = value
elif djangoVersion[0:2] >= (1, 5 ):
if isinstance(value, six.string_types):
row[index] = re.sub(r'[\x00]', '', value)
return tuple(row)
| nutztherookie/ibm_db_django | ibm_db_django/pybase.py | Python | apache-2.0 | 11,913 |
#! /usr/bin/env python
"""
Copyright (C) 2012-2013 Jussi Leinonen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from ..mie_coated import Mie
import sys
#some allowance for rounding errors etc
epsilon = 1e2*sys.float_info.epsilon
def run_tests():
"""Tests for the Mie code.
Runs several tests that test the Mie code. All tests should return ok.
If they don't, please contact the author.
"""
suite = unittest.TestLoader().loadTestsFromTestCase(MieTests)
unittest.TextTestRunner(verbosity=2).run(suite)
class MieTests(unittest.TestCase):
def test_single_nonmagnetic(self):
mie = Mie(m=complex(1.5,0.5),x=2.5)
qext_ref = 2.562873497454734
qsca_ref = 1.0970718190883924
qabs_ref = 1.4658016783663417
qb_ref = 0.12358646817981821
asy_ref = 0.74890597894850719
qratio_ref = 0.112651210275834
for (func,ref) in zip(
(mie.qext, mie.qsca, mie.qabs, mie.qb, mie.asy, mie.qratio),
(qext_ref,qsca_ref,qabs_ref,qb_ref,asy_ref,qratio_ref)):
self.assertLess(abs(ref-func())/ref, epsilon)
S12_ref = (complex(-0.49958438416709694,-0.24032581667666403),
complex(0.11666852712178288,0.051661382367147853))
S12 = mie.S12(-0.6)
self.assertLess(abs(S12_ref[0]-S12[0])/S12_ref[0], epsilon)
self.assertLess(abs(S12_ref[1]-S12[1])/S12_ref[1], epsilon)
def test_coated(self):
mie = Mie(m=complex(1.5,0.5),m2=complex(1.2,0.2),x=1.5,y=5.0)
qext_ref = 2.0765452928100769
qsca_ref = 0.90777572021757091
qabs_ref = 1.168769572592506
qb_ref = 0.022692436240597712
asy_ref = 0.90560220988567752
qratio_ref = 0.024997844440209204
for (func,ref) in zip(
(mie.qext, mie.qsca, mie.qabs, mie.qb, mie.asy, mie.qratio),
(qext_ref,qsca_ref,qabs_ref,qb_ref,asy_ref,qratio_ref)):
self.assertLess(abs(ref-func())/ref, epsilon)
S12_ref = (complex(0.28677219451960079,-0.063605895700765691),
complex(-0.32635924647084191,0.12670342074119806))
S12 = mie.S12(-0.6)
self.assertLess(abs(S12_ref[0]-S12[0])/S12_ref[0], epsilon)
self.assertLess(abs(S12_ref[1]-S12[1])/S12_ref[1], epsilon)
def test_magnetic(self):
mie = Mie(eps=complex(2.2,0.8),mu=complex(1.6,1.4),x=4.0)
qext_ref = 2.6665582594291073
qsca_ref = 1.1255460946883893
qabs_ref = 1.541012164740718
qb_ref = 0.0072453174040961301
asy_ref = 0.89955981937838192
qratio_ref = 0.0064371574281033928
for (func,ref) in zip(
(mie.qext, mie.qsca, mie.qabs, mie.qb, mie.asy, mie.qratio),
(qext_ref,qsca_ref,qabs_ref,qb_ref,asy_ref,qratio_ref)):
self.assertLess(abs(ref-func())/ref, epsilon)
S12_ref = (complex(0.14683196954000932,-0.017479181764394575),
complex(-0.12475414168001844,0.28120475717321358))
S12 = mie.S12(-0.6)
self.assertLess(abs(S12_ref[0]-S12[0])/S12_ref[0], epsilon)
self.assertLess(abs(S12_ref[1]-S12[1])/S12_ref[1], epsilon)
def test_keywords(self):
mie = Mie(m=complex(1.5,0.5),m2=complex(1.2,0.2),x=1.5,y=5.0)
mie2 = Mie()
mie2.m = complex(1.5,0.5)
mie2.m2 = complex(1.2,0.2)
mie2.x = 1.5
mie2.y = 5.0
for (func1,func2) in zip(
(mie.qext, mie.qsca, mie.qabs, mie.qb, mie.asy, mie.qratio),
(mie2.qext, mie2.qsca, mie2.qabs, mie2.qb, mie2.asy,
mie2.qratio)):
self.assertEqual(func1(),func2())
def test_errors(self):
mie = Mie()
#test that negative values of x fail
def test_x():
mie.x = -1.0
self.assertRaises(ValueError, test_x)
mie.x = 1.0
#test that a missing m fails
self.assertRaises(ValueError, mie.qext)
mie.m = complex(1.5,0.5)
#test that y<x fails (y==x is permitted)
def test_y():
mie.y = 0.5
self.assertRaises(ValueError, test_y)
mie.y = 1.5
#test that setting y without m2 fails
self.assertRaises(ValueError, mie.qext)
mie.m2 = complex(1.2,0.5)
#test that invalid values of u fail
self.assertRaises(ValueError, mie.S12, -1.5)
mie.mu = complex(1.5,0.6)
#test that multilayered particles with mu fail
self.assertRaises(ValueError, mie.qext)
if __name__ == '__main__':
unittest.main()
| samatwood/csumuriaopt | pymiecoated/test/test_mie.py | Python | mit | 5,574 |
import math
import argparse
from datetime import datetime
import numpy as np
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import metrics
def load_data(path, **kwargs):
return np.loadtxt(path, **kwargs)
def save_data(path, data, **kwargs):
np.savetxt(path, data, **kwargs)
def hour_from_dt_string(dt_string):
return datetime.strptime(dt_string, '%Y-%m-%d %H:%M:%S').hour
def preprocessing(X, y):
is_seasons = np.empty((X.shape[0], 4))
for i in xrange(1, 5):
is_seasons[:,i-1] = X[:,1] == i
print is_seasons
X = np.hstack((X[:,[0,2,3,6,7,8]], X[:,0,None]**2, X[:,0,None]**3))
return X, y
def cv(estimator, X, y):
k_fold = cross_validation.KFold(n=len(train_dataset), n_folds=10,
indices=True)
a = 0.0
for train_idx, test_idx in k_fold:
r = estimator.fit(X[train_idx], y[train_idx]).predict(X[test_idx])
r = np.where(r > 0, r, 0.01)
s = math.sqrt(metrics.mean_squared_error(np.log(y[test_idx] + 1),
np.log(r + 1.0)))
a += s
print 'Score: {:.4f}'.format(s)
print 'Weights: {}'.format(estimator.coef_)
print 'Average score: {:.4f}'.format(a/len(k_fold))
def loss_func(y_real, y_predicted):
return math.sqrt(metrics.mean_squared_error(np.log(y_real + 1), np.log(y_predicted + 1)))
if __name__ == '__main__':
# Command arguments
parser = argparse.ArgumentParser(description='bike-sharing estimator')
parser.add_argument('--cv', dest='cv', action='store_const', const=True,
default=False, help='Do cross validation')
parser.add_argument('--no-test', dest='out', action='store_const',
const=False, default=True, help='No test dataset')
args = parser.parse_args()
# Input
common_input_options = {'delimiter': ',', 'skiprows': 1,
'converters': {0: hour_from_dt_string} }
train_dataset = load_data('data/train.csv', usecols=(0,1,2,3,4,5,6,7,8,11),
**common_input_options)
test_dataset = load_data('data/test.csv', usecols=(0,1,2,3,4,5,6,7,8),
**common_input_options)
common_input_options['converters'] = {}
out_column = load_data('data/test.csv', usecols=(0,), dtype=str,
**common_input_options)
# Data preprocessing
X_train, y_train = preprocessing(train_dataset[:,:-1], train_dataset[:,-1])
X_test, y_test = preprocessing(test_dataset, None)
# The interesting part
estimator = linear_model.Ridge(copy_X=True, alpha=0.1, fit_intercept=True, normalize=True)
if args.cv:
cv(estimator, X_train, y_train)
if args.out:
results = estimator.fit(X_train, y_train).predict(X_test)
results = np.where(results > 0, results, 0.01).astype(np.int)
# Output
save_data('data/out.csv', np.column_stack((out_column.T, results.T)),
delimiter=',', header='datetime,count', fmt=('%s', '%s'),
comments='')
| LeartS/kaggle-competitions | bike-sharing/0-linear-regression.py | Python | mit | 3,125 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright 2006 - 2012 Philipp Wollermann
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import sys
import os
import os.path
from os import chmod, chown, rename
from stat import S_IMODE, ST_MODE
from datetime import datetime
from time import mktime
from subprocess import Popen, PIPE
from shutil import copy2
import psycopg2
import psycopg2.extras
import csv
import re
def ensure_permissions(directory, perms):
if not os.path.exists(directory):
print "WARNING: directory did not exist: %s" % (directory,)
os.makedirs(directory, perms)
statinfo = os.stat(directory)
if not S_IMODE(statinfo[ST_MODE]) == perms:
print "WARNING: directory had wrong permissions: %s" % (directory,)
os.chmod(directory, perms)
def ensure_uid_gid(directory, uid, gid):
statinfo = os.stat(directory)
if uid != -1:
if not statinfo.st_uid == uid:
print "WARNING: directory had wrong owner: %i != %i" % (uid, statinfo.st_uid)
os.chown(directory, uid, -1)
if gid != -1:
if not statinfo.st_gid == gid:
print "WARNING: directory had wrong group: %i != %i" % (gid, statinfo.st_gid)
os.chown(directory, -1, gid)
def main(argv=None):
if argv is None:
argv = sys.argv
DATABASE_PASSWORD = 'XXXXXXXXXXX'
db = psycopg2.connect("host='localhost' user='pysk' password='%s' dbname='pysk'" % (DATABASE_PASSWORD,))
cursor = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
# /etc/passwd
query = "SELECT u.username as plainusername, u.username, u.password, u.id + 9999 AS uid, u.id + 9999 AS gid, 'igowo user' AS gecos, '/home/' || u.username AS home, '/bin/bash' AS shell, 'false' AS ftponly FROM auth_user u WHERE u.password LIKE 'crypt%'"
query = query + " UNION SELECT u.username as plainusername, u.username || '-' || fu.suffix, fu.password, u.id + 9999 AS uid, u.id + 9999 AS gid, 'igowo ftp user' AS gecos, fu.home, '/usr/local/bin/ftponly' AS shell, 'true' AS ftponly FROM vps_ftpuser fu, auth_user u WHERE fu.owner_id = u.id AND u.password LIKE 'crypt%' ORDER BY username"
cursor.execute(query)
users = cursor.fetchall()
# Check if all passwords are encrypted correctly
for u in users:
assert(u["password"].startswith("crypt$$1$"))
assert(not u["username"].startswith("philwo-"))
assert(not u["username"].startswith("pysk-"))
home = os.path.realpath(u["home"]) + "/"
print home
assert(re.match(r"^/home/%s/[\w\d\-_./ ]*$" % (u["plainusername"],), home))
#users_by_uid = dict([(x["uid"], x) for x in users])
users_by_username = dict([(x["username"], x) for x in users])
passwd_csv = csv.reader(open("/etc/passwd", "rb"), delimiter=":", quoting=csv.QUOTE_NONE)
group_csv = csv.reader(open("/etc/group", "rb"), delimiter=":", quoting=csv.QUOTE_NONE)
shadow_csv = csv.reader(open("/etc/shadow", "rb"), delimiter=":", quoting=csv.QUOTE_NONE)
passwd_new_file = open("/etc/passwd.new", "w+b")
group_new_file = open("/etc/group.new", "w+b")
shadow_new_file = open("/etc/shadow.new", "w+b")
chroot_users = open("/etc/vsftpd.chroot_list.new", "wb")
passwd_new = csv.writer(passwd_new_file, delimiter=":", quoting=csv.QUOTE_NONE, lineterminator="\n")
group_new = csv.writer(group_new_file, delimiter=":", quoting=csv.QUOTE_NONE, lineterminator="\n")
shadow_new = csv.writer(shadow_new_file, delimiter=":", quoting=csv.QUOTE_NONE, lineterminator="\n")
chmod("/etc/passwd.new", 0644)
chmod("/etc/group.new", 0644)
chmod("/etc/shadow.new", 0640)
chmod("/etc/vsftpd.chroot_list.new", 0644)
chown("/etc/passwd.new", 0, 0)
chown("/etc/group.new", 0, 0)
chown("/etc/shadow.new", 0, 0)
chown("/etc/vsftpd.chroot_list.new", 0, 0)
# Read old passwd/group/shadow
for row in passwd_csv:
uid = int(row[2])
if uid < 10000 or uid >= 20000:
# User is an un-managed user, just copy it
passwd_new.writerow(row)
for row in group_csv:
gid = int(row[2])
if gid != 100 and (gid < 10000 or gid >= 20000):
# User is an un-managed user, just copy it
group_new.writerow(row)
for row in shadow_csv:
username = row[0]
if not username in users_by_username:
# User is an un-managed user, just copy it
shadow_new.writerow(row)
# Add our managed users from pysk
# passwd.new
for user_row in users:
# Insert fake password, necessary for /etc/passwd
username = user_row["username"]
fakepasswd = "x" # not user_row[1] !
uid = user_row["uid"]
gid = user_row["gid"]
gecos = user_row["gecos"]
home = os.path.realpath(user_row["home"])
shell = user_row["shell"]
passwd_new.writerow((username, fakepasswd, uid, gid, gecos, home, shell))
# group.new
userlist = []
for user_row in users:
if user_row["ftponly"] == "true":
continue
groupname = user_row["username"]
fakepasswd = "x"
gid = user_row["gid"]
members = ""
userlist.append(groupname)
group_new.writerow((groupname, fakepasswd, gid, members))
group_new.writerow(("users", "x", "100", ",".join(userlist)))
# shadow.new
for user_row in users:
username = user_row["username"]
password = user_row["password"][6:]
days_since_1970 = int(mktime(datetime.now().timetuple()) / 86400)
days_before_change_allowed = 0
days_after_change_necessesary = 99999
days_before_expire = 7
shadow_new.writerow((username, password, days_since_1970, days_before_change_allowed, days_after_change_necessesary, days_before_expire, "", "", ""))
for user_row in users:
if user_row["ftponly"] == "true":
chroot_users.write("%s\n" % (user_row["username"],))
# Finish up
passwd_new_file.close()
group_new_file.close()
shadow_new_file.close()
chroot_users.close()
copy2("/etc/passwd", "/etc/passwd.old")
copy2("/etc/group", "/etc/group.old")
copy2("/etc/shadow", "/etc/shadow.old")
if os.path.isfile("/etc/vsftpd.chroot_list"):
copy2("/etc/vsftpd.chroot_list", "/etc/vsftpd.chroot_list.old")
rename("/etc/passwd.new", "/etc/passwd")
rename("/etc/group.new", "/etc/group")
rename("/etc/shadow.new", "/etc/shadow")
rename("/etc/vsftpd.chroot_list.new", "/etc/vsftpd.chroot_list")
print Popen(["/usr/sbin/pwck", "-s"], stdout=PIPE).communicate()[0]
print Popen(["/usr/sbin/grpck", "-s"], stdout=PIPE).communicate()[0]
print Popen(["diff", "-u", "/etc/passwd.old", "/etc/passwd"], stdout=PIPE).communicate()[0]
print Popen(["diff", "-u", "/etc/group.old", "/etc/group"], stdout=PIPE).communicate()[0]
print Popen(["diff", "-u", "/etc/shadow.old", "/etc/shadow"], stdout=PIPE).communicate()[0]
if os.path.isfile("/etc/vsftpd.chroot_list.old"):
print Popen(["diff", "-u", "/etc/vsftpd.chroot_list.old", "/etc/vsftpd.chroot_list"], stdout=PIPE).communicate()[0]
for user_row in users:
if user_row["ftponly"] == "true":
continue
user = user_row["username"]
uid = user_row["uid"]
home = os.path.realpath(user_row["home"])
gid = 100 # "users" group
print "Fixing permissions for user %s ..." % (user,)
# /home/username
ensure_permissions(home, 0755)
ensure_uid_gid(home, uid, gid)
# /home/username/www
ensure_permissions(os.path.join(home, "www"), 0755)
ensure_uid_gid(os.path.join(home, "www"), uid, gid)
if __name__ == "__main__":
sys.exit(main())
| philwo/pysk | tools/passwd/passwd.py | Python | apache-2.0 | 8,297 |
import pickle
import traceback
from http.server import SimpleHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from ray.rllib.utils.annotations import PublicAPI
from ray.rllib.utils.policy_client import PolicyClient
@PublicAPI
class PolicyServer(ThreadingMixIn, HTTPServer):
"""REST server than can be launched from a ExternalEnv.
This launches a multi-threaded server that listens on the specified host
and port to serve policy requests and forward experiences to RLlib.
Examples:
>>> class CartpoleServing(ExternalEnv):
def __init__(self):
ExternalEnv.__init__(
self, spaces.Discrete(2),
spaces.Box(
low=-10,
high=10,
shape=(4,),
dtype=np.float32))
def run(self):
server = PolicyServer(self, "localhost", 8900)
server.serve_forever()
>>> register_env("srv", lambda _: CartpoleServing())
>>> pg = PGTrainer(env="srv", config={"num_workers": 0})
>>> while True:
pg.train()
>>> client = PolicyClient("localhost:8900")
>>> eps_id = client.start_episode()
>>> action = client.get_action(eps_id, obs)
>>> ...
>>> client.log_returns(eps_id, reward)
>>> ...
>>> client.log_returns(eps_id, reward)
"""
@PublicAPI
def __init__(self, external_env, address, port):
handler = _make_handler(external_env)
HTTPServer.__init__(self, (address, port), handler)
def _make_handler(external_env):
class Handler(SimpleHTTPRequestHandler):
def do_POST(self):
content_len = int(self.headers.get("Content-Length"), 0)
raw_body = self.rfile.read(content_len)
parsed_input = pickle.loads(raw_body)
try:
response = self.execute_command(parsed_input)
self.send_response(200)
self.end_headers()
self.wfile.write(pickle.dumps(response))
except Exception:
self.send_error(500, traceback.format_exc())
def execute_command(self, args):
command = args["command"]
response = {}
if command == PolicyClient.START_EPISODE:
response["episode_id"] = external_env.start_episode(
args["episode_id"], args["training_enabled"])
elif command == PolicyClient.GET_ACTION:
response["action"] = external_env.get_action(
args["episode_id"], args["observation"])
elif command == PolicyClient.LOG_ACTION:
external_env.log_action(args["episode_id"],
args["observation"], args["action"])
elif command == PolicyClient.LOG_RETURNS:
external_env.log_returns(args["episode_id"], args["reward"],
args["info"])
elif command == PolicyClient.END_EPISODE:
external_env.end_episode(args["episode_id"],
args["observation"])
else:
raise Exception("Unknown command: {}".format(command))
return response
return Handler
| stephanie-wang/ray | rllib/utils/policy_server.py | Python | apache-2.0 | 3,404 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bug_tracker', '0005_comment'),
]
operations = [
migrations.AlterModelOptions(
name='bugstatushistory',
options={'ordering': ['-date_created']},
),
migrations.AlterModelOptions(
name='comment',
options={'ordering': ['-date_created']},
),
migrations.AddField(
model_name='comment',
name='status_after',
field=models.OneToOneField(related_name='+', null=True, to='bug_tracker.BugStatus'),
preserve_default=True,
),
migrations.AddField(
model_name='comment',
name='status_before',
field=models.OneToOneField(related_name='+', null=True, to='bug_tracker.BugStatus'),
preserve_default=True,
),
]
| danux/fractum | bug_tracker/migrations/0006_auto_20150301_0327.py | Python | mit | 990 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
# Luis Rodriguez <luis.rodriguez@opendeusto.es>
#
import sys
from weblab.admin.cli.controller import DbConfiguration
from weblab.admin.script.utils import run_with_config
from weblab.db.upgrade import DbUpgrader
#########################################################################################
#
#
#
# W E B L A B U P G R A D E
#
#
#
def weblab_upgrade(directory):
def on_dir(directory, configuration_files):
db_conf = DbConfiguration(configuration_files)
regular_url = db_conf.build_url()
coord_url = db_conf.build_coord_url()
upgrader = DbUpgrader(regular_url, coord_url)
if not upgrader.check_updated():
print "The system is outdated. Please, make a backup of the current deployment (copy the directory and make a backup of the database)."
if raw_input("Do you want to continue with the upgrade? (y/n)") == 'y':
print "Upgrading database."
sys.stdout.flush()
upgrader.upgrade()
print "Upgrade completed."
else:
print "Upgrade aborted."
run_with_config(directory, on_dir)
| ganeshgore/myremolab | server/src/weblab/admin/script/upgrade.py | Python | bsd-2-clause | 1,556 |
import pygame
import sys
OBSTACLE_COLOR = (255, 0, 0)
BACKGROUND_COLOR = (200, 200, 200)
GOAL_COLOR = (255, 0, 255)
START_COLOR = (0, 0, 255)
ROBOT_COLOR = (0, 0, 255)
def draw_circle(screen, point, is_goal):
if is_goal:
pygame.draw.circle(screen, GOAL_COLOR, point, 30, 3)
else:
pygame.draw.circle(screen, START_COLOR, point, 30, 3)
def draw_polygon(screen, point_list, color, currently_working):
if len(point_list) == 1:
pygame.draw.circle(screen, color, point_list[0], 3)
elif len(point_list) == 2:
pygame.draw.line(screen, color, point_list[0], point_list[1], 3)
elif len(point_list) == 0:
return
else:
if currently_working:
pygame.draw.lines(screen, color, False, point_list, 3)
else:
pygame.draw.polygon(screen, color, point_list)
def start(width, height, filename):
pygame.display.init()
screen = pygame.display.set_mode((width, height))
screen.fill(BACKGROUND_COLOR)
done = False
obstacle_list = list()
current_obstacle = list()
robot = list()
start_point = [0, 0]
end_point = [width, height]
editing_obstacles = True
while not done:
screen.fill(BACKGROUND_COLOR)
for obstacle in obstacle_list:
draw_polygon(screen, obstacle, OBSTACLE_COLOR, False)
draw_circle(screen, start_point, False)
draw_circle(screen, end_point, True)
draw_polygon(screen, current_obstacle, OBSTACLE_COLOR,
editing_obstacles)
draw_polygon(screen, robot, ROBOT_COLOR, not editing_obstacles)
event_list = pygame.event.get()
for event in event_list:
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
if pygame.key.get_pressed()[pygame.K_s]:
start_point = pos
elif pygame.key.get_pressed()[pygame.K_g]:
end_point = pos
elif not editing_obstacles:
robot.append(pos)
elif editing_obstacles:
current_obstacle.append(pos)
elif event.type == pygame.KEYUP:
if event.key == pygame.K_e:
obstacle_list.append(current_obstacle)
current_obstacle = list()
elif event.key == pygame.K_w:
pygame.display.set_caption("Writing to " + filename)
to_cpp(
width, height,
obstacle_list,
robot,
start_point,
end_point,
filename
)
elif event.key == pygame.K_o:
editing_obstacles = True
elif event.key == pygame.K_r:
editing_obstacles = False
elif event.type == pygame.QUIT:
to_cpp(
width, height,
obstacle_list,
robot,
start_point,
end_point,
filename)
exit()
pygame.display.flip()
def to_cpp(width, height, obstacle_list, robot, start_pos, goal_pos, filename):
with open(filename, "w") as f:
f.write("#include <vector>\n")
f.write("using namespace std;\n")
f.write("class ObstaclesInstance {};\n")
if __name__ == "__main__":
if len(sys.argv) == 4:
start(int(sys.argv[1]), int(sys.argv[2]), sys.argv[3])
else:
raise Exception("Not enough command line arguments given")
| wallarelvo/Dodger | scripts/map_generator.py | Python | apache-2.0 | 3,668 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Tests for DNNSampledSoftmaxClassifier estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import dnn_sampled_softmax_classifier
from tensorflow.python.ops import math_ops
class DNNSampledSoftmaxClassifierTest(tf.test.TestCase):
def testMultiClass(self):
"""Tests the following.
1. Tests fit() and evaluate() calls.
2. Tests the use of a non default optimizer.
3. Tests the output of get_variable_names().
Note that the training output is not verified because it is flaky with the
Iris dataset.
"""
def _iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int64)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=1,
n_labels=1,
feature_columns=cont_features,
hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_fn, steps=5)
classifier.evaluate(input_fn=_iris_input_fn, steps=1)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 6)
def testTrainWithPartitionedVariables(self):
"""Tests the following.
1. Tests training with partitioned variables.
2. Test that the model actually trains.
3. Tests the output of evaluate() and predict().
"""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=5))
# Test that the model actually trains.
classifier.fit(input_fn=_input_fn, steps=50)
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(evaluate_output['precision_at_1'], 0.9)
self.assertGreater(evaluate_output['recall_at_1'], 0.9)
# Test the output of predict()
predict_output = classifier.predict(input_fn=_input_fn)
self.assertListEqual([1, 0, 0], list(predict_output))
def testTrainSaveLoad(self):
"""Tests that ensure that you can save and reload a trained model."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=10)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier1 = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
model_dir=model_dir,
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4])
classifier1.fit(input_fn=_input_fn, steps=1)
predict_output1 = classifier1.predict(input_fn=_input_fn)
del classifier1
classifier2 = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
model_dir=model_dir,
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4])
predict_output2 = classifier2.predict(input_fn=_input_fn)
self.assertEqual(list(predict_output1), list(predict_output2))
def testCustomOptimizerByObject(self):
"""Tests the use of custom optimizer."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
# Test that the model actually trains.
classifier.fit(input_fn=_input_fn, steps=50)
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(evaluate_output['precision_at_1'], 0.9)
self.assertGreater(evaluate_output['recall_at_1'], 0.9)
# Test the output of predict()
predict_output = classifier.predict(input_fn=_input_fn)
self.assertListEqual([1, 0, 0], list(predict_output))
def testCustomOptimizerByFunction(self):
"""Tests the use of custom optimizer."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
def _optimizer_exp_decay():
global_step = tf.contrib.framework.get_global_step()
learning_rate = tf.train.exponential_decay(learning_rate=0.01,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=_optimizer_exp_decay,
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
# Test that the model actually trains.
classifier.fit(input_fn=_input_fn, steps=50)
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(evaluate_output['precision_at_1'], 0.6)
self.assertGreater(evaluate_output['recall_at_1'], 0.6)
def testExport(self):
"""Tests that export model for servo works."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=100)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4])
export_dir = tempfile.mkdtemp()
classifier.fit(input_fn=_input_fn, steps=50)
classifier.export(export_dir)
def testPredictAsIterable(self):
"""Tests predict() and predict_proba() call with as_iterable set to True."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[.9], [.1], [.1]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=feature_columns,
hidden_units=[4, 4])
classifier.fit(input_fn=_input_fn, steps=1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
# Test the output of predict() and predict_proba() with as_iterable=True
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(predictions,
np.argmax(predictions_proba, 1)))
def testCustomMetrics(self):
"""Tests the use of custom metric."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
def _my_metric_op(predictions, targets):
"""Simply multiplies predictions and targets to return [1, 0 , 0]."""
prediction_classes = math_ops.argmax(predictions, 1)
return tf.mul(prediction_classes, tf.reshape(targets, [-1]))
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
# Test that the model actually trains.
classifier.fit(input_fn=_input_fn, steps=50)
metrics = {('my_metric', 'probabilities'): _my_metric_op}
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1,
metrics=metrics)
self.assertListEqual([1, 0, 0], list(evaluate_output['my_metric']))
def testMultiLabelTopKWithCustomMetrics(self):
"""Tests the cases where n_labels>1 top_k>1 and custom metrics on top_k."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[0, 1], [0, 1], [0, 1]], dtype=tf.int64)
return features, target
def _my_metric_op(predictions, targets):
"""Simply adds the predictions and targets."""
return tf.add(math_ops.to_int64(predictions), targets)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=2,
top_k=2,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
classifier.fit(input_fn=_input_fn, steps=50)
# evaluate() without custom metrics.
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(evaluate_output['precision_at_1'], 0.4)
self.assertGreater(evaluate_output['recall_at_1'], 0.4)
self.assertGreater(evaluate_output['precision_at_2'], 0.4)
self.assertGreater(evaluate_output['recall_at_2'], 0.4)
# evaluate() with custom metrics.
metrics = {('my_metric', 'top_k'): _my_metric_op}
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1,
metrics=metrics)
# This test's output is flaky so just testing that 'my_metric' is indeed
# part of the evaluate_output.
self.assertTrue('my_metric' in evaluate_output)
# predict() with top_k.
predict_output = classifier.predict(input_fn=_input_fn, get_top_k=True)
self.assertListEqual([3, 2], list(predict_output.shape))
# TODO(dnivara): Setup this test such that it is not flaky and predict() and
# evaluate() outputs can be tested.
if __name__ == '__main__':
tf.test.main()
| naturali/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_sampled_softmax_classifier_test.py | Python | apache-2.0 | 15,025 |
from __future__ import unicode_literals
import datetime
import os
from decimal import Decimal
from unittest import skipUnless
from django import forms
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured,
)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import ValidationError
from django.db import connection, models
from django.db.models.query import EmptyQuerySet
from django.forms.models import (
ModelFormMetaclass, construct_instance, fields_for_model, model_to_dict,
modelform_factory,
)
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils._os import upath
from .models import (
Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book,
Category, Character, Colour, ColourfulItem, CommaSeparatedInteger,
CustomErrorMessage, CustomFF, CustomFieldForExclusionModel, DateTimePost,
DerivedBook, DerivedPost, Document, ExplicitPK, FilePathModel,
FlexibleDatePost, Homepage, ImprovedArticle, ImprovedArticleWithParentLink,
Inventory, Person, Photo, Post, Price, Product, Publication,
PublicationDefaults, StrictAssignmentAll, StrictAssignmentFieldSpecific,
Student, StumpJoke, TextFile, Triple, Writer, WriterProfile, test_images,
)
if test_images:
from .models import ImageFile, OptionalImageFile
class ImageFileForm(forms.ModelForm):
class Meta:
model = ImageFile
fields = '__all__'
class OptionalImageFileForm(forms.ModelForm):
class Meta:
model = OptionalImageFile
fields = '__all__'
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
class PriceForm(forms.ModelForm):
class Meta:
model = Price
fields = '__all__'
class BookForm(forms.ModelForm):
class Meta:
model = Book
fields = '__all__'
class DerivedBookForm(forms.ModelForm):
class Meta:
model = DerivedBook
fields = '__all__'
class ExplicitPKForm(forms.ModelForm):
class Meta:
model = ExplicitPK
fields = ('key', 'desc',)
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
class DerivedPostForm(forms.ModelForm):
class Meta:
model = DerivedPost
fields = '__all__'
class CustomWriterForm(forms.ModelForm):
name = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
class BaseCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = '__all__'
class RoykoForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class ArticleStatusForm(forms.ModelForm):
class Meta:
model = ArticleStatus
fields = '__all__'
class InventoryForm(forms.ModelForm):
class Meta:
model = Inventory
fields = '__all__'
class SelectInventoryForm(forms.Form):
items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
class CustomFieldForExclusionForm(forms.ModelForm):
class Meta:
model = CustomFieldForExclusionModel
fields = ['name', 'markup']
class TextFileForm(forms.ModelForm):
class Meta:
model = TextFile
fields = '__all__'
class BigIntForm(forms.ModelForm):
class Meta:
model = BigInt
fields = '__all__'
class ModelFormWithMedia(forms.ModelForm):
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
class Meta:
model = TextFile
fields = '__all__'
class CustomErrorMessageForm(forms.ModelForm):
name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'})
class Meta:
fields = '__all__'
model = CustomErrorMessage
class ModelFormBaseTest(TestCase):
def test_base_form(self):
self.assertEqual(list(BaseCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_no_model_class(self):
class NoModelModelForm(forms.ModelForm):
pass
with self.assertRaises(ValueError):
NoModelModelForm()
def test_empty_fields_to_fields_for_model(self):
"""
An argument of fields=() to fields_for_model should return an empty dictionary
"""
field_dict = fields_for_model(Person, fields=())
self.assertEqual(len(field_dict), 0)
def test_empty_fields_on_modelform(self):
"""
No fields on a ModelForm should actually result in no fields.
"""
class EmptyPersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ()
form = EmptyPersonForm()
self.assertEqual(len(form.fields), 0)
def test_empty_fields_to_construct_instance(self):
"""
No fields should be set on a model instance if construct_instance receives fields=().
"""
form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'})
self.assertTrue(form.is_valid())
instance = construct_instance(form, Person(), fields=())
self.assertEqual(instance.name, '')
def test_blank_with_null_foreign_key_field(self):
"""
#13776 -- ModelForm's with models having a FK set to null=False and
required=False should be valid.
"""
class FormForTestingIsValid(forms.ModelForm):
class Meta:
model = Student
fields = '__all__'
def __init__(self, *args, **kwargs):
super(FormForTestingIsValid, self).__init__(*args, **kwargs)
self.fields['character'].required = False
char = Character.objects.create(username='user',
last_action=datetime.datetime.today())
data = {'study': 'Engineering'}
data2 = {'study': 'Engineering', 'character': char.pk}
# form is valid because required=False for field 'character'
f1 = FormForTestingIsValid(data)
self.assertTrue(f1.is_valid())
f2 = FormForTestingIsValid(data2)
self.assertTrue(f2.is_valid())
obj = f2.save()
self.assertEqual(obj.character, char)
def test_blank_false_with_null_true_foreign_key_field(self):
"""
A ModelForm with a model having ForeignKey(blank=False, null=True)
and the form field set to required=False should allow the field to be
unset.
"""
class AwardForm(forms.ModelForm):
class Meta:
model = Award
fields = '__all__'
def __init__(self, *args, **kwargs):
super(AwardForm, self).__init__(*args, **kwargs)
self.fields['character'].required = False
character = Character.objects.create(username='user', last_action=datetime.datetime.today())
award = Award.objects.create(name='Best sprinter', character=character)
data = {'name': 'Best tester', 'character': ''} # remove character
form = AwardForm(data=data, instance=award)
self.assertTrue(form.is_valid())
award = form.save()
self.assertIsNone(award.character)
def test_save_blank_false_with_required_false(self):
"""
A ModelForm with a model with a field set to blank=False and the form
field set to required=False should allow the field to be unset.
"""
obj = Writer.objects.create(name='test')
form = CustomWriterForm(data={'name': ''}, instance=obj)
self.assertTrue(form.is_valid())
obj = form.save()
self.assertEqual(obj.name, '')
def test_missing_fields_attribute(self):
message = (
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form "
"MissingFieldsForm needs updating."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
class MissingFieldsForm(forms.ModelForm):
class Meta:
model = Category
def test_extra_fields(self):
class ExtraFields(BaseCategoryForm):
some_extra_field = forms.BooleanField()
self.assertEqual(list(ExtraFields.base_fields),
['name', 'slug', 'url', 'some_extra_field'])
def test_extra_field_model_form(self):
try:
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'no-field')
except FieldError as e:
# Make sure the exception contains some reference to the
# field responsible for the problem.
self.assertIn('no-field', e.args[0])
else:
self.fail('Invalid "no-field" field not caught')
def test_extra_declared_field_model_form(self):
try:
class ExtraPersonForm(forms.ModelForm):
""" ModelForm with an extra field """
age = forms.IntegerField()
class Meta:
model = Person
fields = ('name', 'age')
except FieldError:
self.fail('Declarative field raised FieldError incorrectly')
def test_extra_field_modelform_factory(self):
with self.assertRaises(FieldError):
modelform_factory(Person, fields=['no-field', 'name'])
def test_replace_field(self):
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_replace_field_variant_2(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = ['url']
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_replace_field_variant_3(self):
# Should have the same result as before,
# but 'fields' attribute specified differently
class ReplaceField(forms.ModelForm):
url = forms.BooleanField()
class Meta:
model = Category
fields = [] # url will still appear, since it is explicit above
self.assertIsInstance(ReplaceField.base_fields['url'],
forms.fields.BooleanField)
def test_override_field(self):
class WriterForm(forms.ModelForm):
book = forms.CharField(required=False)
class Meta:
model = Writer
fields = '__all__'
wf = WriterForm({'name': 'Richard Lockridge'})
self.assertTrue(wf.is_valid())
def test_limit_nonexistent_field(self):
expected_msg = 'Unknown field(s) (nonexistent) specified for Category'
with self.assertRaisesMessage(FieldError, expected_msg):
class InvalidCategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['nonexistent']
def test_limit_fields_with_string(self):
expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ('url') # note the missing comma
def test_exclude_fields(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['url']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug'])
def test_exclude_nonexistent_field(self):
class ExcludeFields(forms.ModelForm):
class Meta:
model = Category
exclude = ['nonexistent']
self.assertEqual(list(ExcludeFields.base_fields),
['name', 'slug', 'url'])
def test_exclude_fields_with_string(self):
expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?"
with self.assertRaisesMessage(TypeError, expected_msg):
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
exclude = ('url') # note the missing comma
def test_exclude_and_validation(self):
# This Price instance generated by this form is not valid because the quantity
# field is required, but the form is valid because the field is excluded from
# the form. This is for backwards compatibility.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
exclude = ('quantity',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
price = form.save(commit=False)
with self.assertRaises(ValidationError):
price.full_clean()
# The form should not validate fields that it doesn't contain even if they are
# specified using 'fields', not 'exclude'.
class PriceFormWithoutQuantity(forms.ModelForm):
class Meta:
model = Price
fields = ('price',)
form = PriceFormWithoutQuantity({'price': '6.00'})
self.assertTrue(form.is_valid())
# The form should still have an instance of a model that is not complete and
# not saved into a DB yet.
self.assertEqual(form.instance.price, Decimal('6.00'))
self.assertIsNone(form.instance.quantity)
self.assertIsNone(form.instance.pk)
def test_confused_form(self):
class ConfusedForm(forms.ModelForm):
""" Using 'fields' *and* 'exclude'. Not sure why you'd want to do
this, but uh, "be liberal in what you accept" and all.
"""
class Meta:
model = Category
fields = ['name', 'url']
exclude = ['url']
self.assertEqual(list(ConfusedForm.base_fields),
['name'])
def test_mixmodel_form(self):
class MixModelForm(BaseCategoryForm):
""" Don't allow more than one 'model' definition in the
inheritance hierarchy. Technically, it would generate a valid
form, but the fact that the resulting save method won't deal with
multiple objects is likely to trip up people not familiar with the
mechanics.
"""
class Meta:
model = Article
fields = '__all__'
# MixModelForm is now an Article-related thing, because MixModelForm.Meta
# overrides BaseCategoryForm.Meta.
self.assertEqual(
list(MixModelForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_article_form(self):
self.assertEqual(
list(ArticleForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_bad_form(self):
# First class with a Meta class wins...
class BadForm(ArticleForm, BaseCategoryForm):
pass
self.assertEqual(
list(BadForm.base_fields),
['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status']
)
def test_invalid_meta_model(self):
class InvalidModelForm(forms.ModelForm):
class Meta:
pass # no model
# Can't create new form
with self.assertRaises(ValueError):
InvalidModelForm()
# Even if you provide a model instance
with self.assertRaises(ValueError):
InvalidModelForm(instance=Category)
def test_subcategory_form(self):
class SubCategoryForm(BaseCategoryForm):
""" Subclassing without specifying a Meta on the class will use
the parent's Meta (or the first parent in the MRO if there are
multiple parent classes).
"""
pass
self.assertEqual(list(SubCategoryForm.base_fields),
['name', 'slug', 'url'])
def test_subclassmeta_form(self):
class SomeCategoryForm(forms.ModelForm):
checkbox = forms.BooleanField()
class Meta:
model = Category
fields = '__all__'
class SubclassMeta(SomeCategoryForm):
""" We can also subclass the Meta inner class to change the fields
list.
"""
class Meta(SomeCategoryForm.Meta):
exclude = ['url']
self.assertHTMLEqual(
str(SubclassMeta()),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_checkbox">Checkbox:</label></th>
<td><input type="checkbox" name="checkbox" id="id_checkbox" /></td></tr>"""
)
def test_orderfields_form(self):
class OrderFields(forms.ModelForm):
class Meta:
model = Category
fields = ['url', 'name']
self.assertEqual(list(OrderFields.base_fields),
['url', 'name'])
self.assertHTMLEqual(
str(OrderFields()),
"""<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>
<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>"""
)
def test_orderfields2_form(self):
class OrderFields2(forms.ModelForm):
class Meta:
model = Category
fields = ['slug', 'url', 'name']
exclude = ['url']
self.assertEqual(list(OrderFields2.base_fields),
['slug', 'name'])
class FieldOverridesByFormMetaForm(forms.ModelForm):
class Meta:
model = Category
fields = ['name', 'url', 'slug']
widgets = {
'name': forms.Textarea,
'url': forms.TextInput(attrs={'class': 'url'})
}
labels = {
'name': 'Title',
}
help_texts = {
'slug': 'Watch out! Letters, numbers, underscores and hyphens only.',
}
error_messages = {
'slug': {
'invalid': (
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!"
)
}
}
field_classes = {
'url': forms.URLField,
}
class TestFieldOverridesByFormMeta(SimpleTestCase):
def test_widget_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name']),
'<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20"></textarea>',
)
self.assertHTMLEqual(
str(form['url']),
'<input id="id_url" type="text" class="url" name="url" maxlength="40" />',
)
self.assertHTMLEqual(
str(form['slug']),
'<input id="id_slug" type="text" name="slug" maxlength="20" />',
)
def test_label_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertHTMLEqual(
str(form['name'].label_tag()),
'<label for="id_name">Title:</label>',
)
self.assertHTMLEqual(
str(form['url'].label_tag()),
'<label for="id_url">The URL:</label>',
)
self.assertHTMLEqual(
str(form['slug'].label_tag()),
'<label for="id_slug">Slug:</label>',
)
def test_help_text_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertEqual(
form['slug'].help_text,
'Watch out! Letters, numbers, underscores and hyphens only.',
)
def test_error_messages_overrides(self):
form = FieldOverridesByFormMetaForm(data={
'name': 'Category',
'url': 'http://www.example.com/category/',
'slug': '!%#*@',
})
form.full_clean()
error = [
"Didn't you read the help text? "
"We said letters, numbers, underscores and hyphens only!",
]
self.assertEqual(form.errors, {'slug': error})
def test_field_type_overrides(self):
form = FieldOverridesByFormMetaForm()
self.assertIs(Category._meta.get_field('url').__class__, models.CharField)
self.assertIsInstance(form.fields['url'], forms.URLField)
class IncompleteCategoryFormWithFields(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ('name', 'slug')
model = Category
class IncompleteCategoryFormWithExclude(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
exclude = ['url']
model = Category
class ValidationTest(SimpleTestCase):
def test_validates_with_replaced_field_not_specified(self):
form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_validates_with_replaced_field_excluded(self):
form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'})
assert form.is_valid()
def test_notrequired_overrides_notblank(self):
form = CustomWriterForm({})
assert form.is_valid()
class UniqueTest(TestCase):
"""
unique/unique_together validation.
"""
def setUp(self):
self.writer = Writer.objects.create(name='Mike Royko')
def test_simple_unique(self):
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertTrue(form.is_valid())
obj = form.save()
form = ProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.'])
form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj)
self.assertTrue(form.is_valid())
def test_unique_together(self):
"""ModelForm test of unique_together constraint"""
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({'price': '6.00', 'quantity': '1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])
def test_multiple_field_unique_together(self):
"""
When the same field is involved in multiple unique_together
constraints, we need to make sure we don't remove the data for it
before doing all the validation checking (not just failing after
the first one).
"""
class TripleForm(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
Triple.objects.create(left=1, middle=2, right=3)
form = TripleForm({'left': '1', 'middle': '2', 'right': '3'})
self.assertFalse(form.is_valid())
form = TripleForm({'left': '1', 'middle': '3', 'right': '1'})
self.assertTrue(form.is_valid())
@skipUnlessDBFeature('supports_nullable_unique_constraints')
def test_unique_null(self):
title = 'I May Be Wrong But I Doubt It'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
form.save()
form = BookForm({'title': title})
self.assertTrue(form.is_valid())
def test_inherited_unique(self):
title = 'Boss'
Book.objects.create(title=title, author=self.writer, special_id=1)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.'])
def test_inherited_unique_together(self):
title = 'Boss'
form = BookForm({'title': title, 'author': self.writer.pk})
self.assertTrue(form.is_valid())
form.save()
form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.'])
def test_abstract_inherited_unique(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'isbn': isbn})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.'])
def test_abstract_inherited_unique_together(self):
title = 'Boss'
isbn = '12345'
DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn)
form = DerivedBookForm({
'title': 'Other',
'author': self.writer.pk,
'isbn': '9876',
'suffix1': '0',
'suffix2': '0'
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['__all__'],
['Derived book with this Suffix1 and Suffix2 already exists.'])
def test_explicitpk_unspecified(self):
"""Test for primary_key being in the form and failing validation."""
form = ExplicitPKForm({'key': '', 'desc': ''})
self.assertFalse(form.is_valid())
def test_explicitpk_unique(self):
"""Ensure keys and blank character strings are tested for uniqueness."""
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({'key': 'key1', 'desc': ''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.'])
def test_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = PostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
form = PostForm({'title': "Django 1.0 is released"})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['posted'], ['This field is required.'])
def test_unique_for_date_in_exclude(self):
"""
If the date for unique_for_* constraints is excluded from the
ModelForm (in this case 'posted' has editable=False, then the
constraint should be ignored.
"""
class DateTimePostForm(forms.ModelForm):
class Meta:
model = DateTimePost
fields = '__all__'
DateTimePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally",
posted=datetime.datetime(2008, 9, 3, 10, 10, 1))
# 'title' has unique_for_date='posted'
form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
# 'slug' has unique_for_year='posted'
form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertTrue(form.is_valid())
# 'subtitle' has unique_for_month='posted'
form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertTrue(form.is_valid())
def test_inherited_unique_for_date(self):
p = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.'])
form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'})
self.assertTrue(form.is_valid())
form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.'])
form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.'])
form = DerivedPostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0", 'posted': '2008-09-03'}, instance=p)
self.assertTrue(form.is_valid())
def test_unique_for_date_with_nullable_date(self):
class FlexDatePostForm(forms.ModelForm):
class Meta:
model = FlexibleDatePost
fields = '__all__'
p = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = FlexDatePostForm({'title': "Django 1.0 is released"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'slug': "Django 1.0"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally"})
self.assertTrue(form.is_valid())
form = FlexDatePostForm({'subtitle': "Finally", "title": "Django 1.0 is released",
"slug": "Django 1.0"}, instance=p)
self.assertTrue(form.is_valid())
def test_override_unique_message(self):
class CustomProductForm(ProductForm):
class Meta(ProductForm.Meta):
error_messages = {
'slug': {
'unique': "%(model_name)s's %(field_label)s not unique.",
}
}
Product.objects.create(slug='teddy-bear-blue')
form = CustomProductForm({'slug': 'teddy-bear-blue'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['slug'], ["Product's Slug not unique."])
def test_override_unique_together_message(self):
class CustomPriceForm(PriceForm):
class Meta(PriceForm.Meta):
error_messages = {
NON_FIELD_ERRORS: {
'unique_together': "%(model_name)s's %(field_labels)s not unique.",
}
}
Price.objects.create(price=6.00, quantity=1)
form = CustomPriceForm({'price': '6.00', 'quantity': '1'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."])
def test_override_unique_for_date_message(self):
class CustomPostForm(PostForm):
class Meta(PostForm.Meta):
error_messages = {
'title': {
'unique_for_date': (
"%(model_name)s's %(field_label)s not unique "
"for %(date_field_label)s date."
),
}
}
Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'})
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."])
class ModelToDictTests(TestCase):
"""
Tests for forms.models.model_to_dict
"""
def test_model_to_dict_many_to_many(self):
categories = [
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art.save()
with self.assertNumQueries(1):
d = model_to_dict(art)
# Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
# Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
def test_reuse_prefetched(self):
# model_to_dict should not hit the database if it can reuse
# the data populated by prefetch_related.
categories = [
Category(name='TestName1', slug='TestName1', url='url1'),
Category(name='TestName2', slug='TestName2', url='url2'),
Category(name='TestName3', slug='TestName3', url='url3')
]
for c in categories:
c.save()
writer = Writer(name='Test writer')
writer.save()
art = Article(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=writer,
article='Hello.'
)
art.save()
for c in categories:
art.categories.add(c)
art = Article.objects.prefetch_related('categories').get(pk=art.pk)
with self.assertNumQueries(0):
d = model_to_dict(art)
# Ensure all many-to-many categories appear in model_to_dict
for c in categories:
self.assertIn(c.pk, d['categories'])
# Ensure many-to-many relation appears as a list
self.assertIsInstance(d['categories'], list)
class ModelFormBasicTests(TestCase):
def create_basic_data(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third test", slug="third-test", url="third")
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
def test_base_form(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm()
self.assertHTMLEqual(
str(f),
"""<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="20" /></td></tr>
<tr><th><label for="id_slug">Slug:</label></th>
<td><input id="id_slug" type="text" name="slug" maxlength="20" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>"""
)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" /></li>
<li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" /></li>
<li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" /></li>"""
)
self.assertHTMLEqual(
str(f["name"]),
"""<input id="id_name" type="text" name="name" maxlength="20" />""")
def test_auto_id(self):
f = BaseCategoryForm(auto_id=False)
self.assertHTMLEqual(
str(f.as_ul()),
"""<li>Name: <input type="text" name="name" maxlength="20" /></li>
<li>Slug: <input type="text" name="slug" maxlength="20" /></li>
<li>The URL: <input type="text" name="url" maxlength="40" /></li>"""
)
def test_initial_values(self):
self.create_basic_data()
# Initial values can be provided for model forms
f = ArticleForm(
auto_id=False,
initial={
'headline': 'Your headline here',
'categories': [str(self.c1.id), str(self.c2.id)]
})
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s" selected="selected">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# When the ModelForm is passed an instance, that instance's current values are
# inserted as 'initial' data in each Field.
f = RoykoForm(auto_id=False, instance=self.w_royko)
self.assertHTMLEqual(
six.text_type(f),
'''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" /><br />
<span class="helptext">Use both first and last names.</span></td></tr>'''
)
art = Article.objects.create(
headline='Test article',
slug='test-article',
pub_date=datetime.date(1988, 1, 4),
writer=self.w_royko,
article='Hello.'
)
art_id_1 = art.id
f = ArticleForm(auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
f = ArticleForm({
'headline': 'Test headline',
'slug': 'test-headline',
'pub_date': '1984-02-06',
'writer': six.text_type(self.w_royko.pk),
'article': 'Hello.'
}, instance=art)
self.assertEqual(f.errors, {})
self.assertTrue(f.is_valid())
test_art = f.save()
self.assertEqual(test_art.id, art_id_1)
test_art = Article.objects.get(id=art_id_1)
self.assertEqual(test_art.headline, 'Test headline')
def test_m2m_initial_callable(self):
"""
Regression for #10349: A callable can be provided as the initial value for an m2m field
"""
self.maxDiff = 1200
self.create_basic_data()
# Set up a callable initial value
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == 'categories':
kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2]
return db_field.formfield(**kwargs)
# Create a ModelForm, instantiate it, and check that the output is as expected
ModelForm = modelform_factory(Article, fields=['headline', 'categories'],
formfield_callback=formfield_for_dbfield)
form = ModelForm()
self.assertHTMLEqual(
form.as_ul(),
"""<li><label for="id_headline">Headline:</label>
<input id="id_headline" type="text" name="headline" maxlength="50" /></li>
<li><label for="id_categories">Categories:</label>
<select multiple="multiple" name="categories" id="id_categories">
<option value="%d" selected="selected">Entertainment</option>
<option value="%d" selected="selected">It&39;s a test</option>
<option value="%d">Third test</option>
</select></li>"""
% (self.c1.pk, self.c2.pk, self.c3.pk))
def test_basic_creation(self):
self.assertEqual(Category.objects.count(), 0)
f = BaseCategoryForm({'name': 'Entertainment',
'slug': 'entertainment',
'url': 'entertainment'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Entertainment')
self.assertEqual(f.cleaned_data['slug'], 'entertainment')
self.assertEqual(f.cleaned_data['url'], 'entertainment')
c1 = f.save()
# Testing whether the same object is returned from the
# ORM... not the fastest way...
self.assertEqual(Category.objects.count(), 1)
self.assertEqual(c1, Category.objects.all()[0])
self.assertEqual(c1.name, "Entertainment")
def test_save_commit_false(self):
# If you call save() with commit=False, then it will return an object that
# hasn't yet been saved to the database. In this case, it's up to you to call
# save() on the resulting model instance.
f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'})
self.assertTrue(f.is_valid())
c1 = f.save(commit=False)
self.assertEqual(c1.name, "Third test")
self.assertEqual(Category.objects.count(), 0)
c1.save()
self.assertEqual(Category.objects.count(), 1)
def test_save_with_data_errors(self):
# If you call save() with invalid data, you'll get a ValueError.
f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'})
self.assertEqual(f.errors['name'], ['This field is required.'])
self.assertEqual(
f.errors['slug'],
["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."]
)
self.assertEqual(f.cleaned_data, {'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'})
with self.assertRaises(ValueError):
f.save()
def test_multi_fields(self):
self.create_basic_data()
self.maxDiff = None
# ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any
# fields with the 'choices' attribute are represented by a ChoiceField.
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
six.text_type(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>
<tr><th>Writer:</th><td><select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></td></tr>
<tr><th>Article:</th><td><textarea rows="10" cols="40" name="article"></textarea></td></tr>
<tr><th>Categories:</th><td><select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></td></tr>
<tr><th>Status:</th><td><select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
# Add some categories and test the many-to-many form output.
new_art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=self.w_royko)
new_art.categories.add(Category.objects.get(name='Entertainment'))
self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"])
f = ArticleForm(auto_id=False, instance=new_art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>
<li>Writer: <select name="writer">
<option value="">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s" selected="selected">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article">Hello.</textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s" selected="selected">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
def test_subset_fields(self):
# You can restrict a form to a subset of the complete list of fields
# by providing a 'fields' argument. If you try to save a
# model created with such a form, you need to ensure that the fields
# that are _not_ on the form have default values, or are allowed to have
# a value of None. If a field isn't specified on a form, the object created
# from the form can't provide a value for that field!
class PartialArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'pub_date')
f = PartialArticleForm(auto_id=False)
self.assertHTMLEqual(
six.text_type(f),
'''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" /></td></tr>
<tr><th>Pub date:</th><td><input type="text" name="pub_date" /></td></tr>''')
# You can create a form over a subset of the available fields
# by specifying a 'fields' argument to form_for_instance.
class PartialArticleFormWithSlug(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'slug', 'pub_date')
w_royko = Writer.objects.create(name='Mike Royko')
art = Article.objects.create(
article="Hello.", headline="New headline", slug="new-headline",
pub_date=datetime.date(1988, 1, 4), writer=w_royko)
f = PartialArticleFormWithSlug({
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04'
}, auto_id=False, instance=art)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" value="1988-01-04" /></li>'''
)
self.assertTrue(f.is_valid())
new_art = f.save()
self.assertEqual(new_art.id, art.id)
new_art = Article.objects.get(id=art.id)
self.assertEqual(new_art.headline, 'New headline')
def test_m2m_editing(self):
self.create_basic_data()
form_data = {
'headline': 'New headline',
'slug': 'new-headline',
'pub_date': '1988-01-04',
'writer': six.text_type(self.w_royko.pk),
'article': 'Hello.',
'categories': [six.text_type(self.c1.id), six.text_type(self.c2.id)]
}
# Create a new article, with categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
new_art = Article.objects.get(id=new_art.id)
art_id_1 = new_art.id
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
# Now, submit form data with no categories. This deletes the existing categories.
form_data['categories'] = []
f = ArticleForm(form_data, instance=new_art)
new_art = f.save()
self.assertEqual(new_art.id, art_id_1)
new_art = Article.objects.get(id=art_id_1)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with no categories, via the form.
f = ArticleForm(form_data)
new_art = f.save()
art_id_2 = new_art.id
self.assertNotIn(art_id_2, (None, art_id_1))
new_art = Article.objects.get(id=art_id_2)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Create a new article, with categories, via the form, but use commit=False.
# The m2m data won't be saved until save_m2m() is invoked on the form.
form_data['categories'] = [six.text_type(self.c1.id), six.text_type(self.c2.id)]
f = ArticleForm(form_data)
new_art = f.save(commit=False)
# Manually save the instance
new_art.save()
art_id_3 = new_art.id
self.assertNotIn(art_id_3, (None, art_id_1, art_id_2))
# The instance doesn't have m2m data yet
new_art = Article.objects.get(id=art_id_3)
self.assertQuerysetEqual(new_art.categories.all(), [])
# Save the m2m data on the form
f.save_m2m()
self.assertQuerysetEqual(new_art.categories.order_by('name'),
["Entertainment", "It's a test"])
def test_custom_form_fields(self):
# Here, we define a custom ModelForm. Because it happens to have the same fields as
# the Category model, we can just call the form's save() to apply its changes to an
# existing Category instance.
class ShortCategory(forms.ModelForm):
name = forms.CharField(max_length=5)
slug = forms.CharField(max_length=5)
url = forms.CharField(max_length=3)
class Meta:
model = Category
fields = '__all__'
cat = Category.objects.create(name='Third test')
form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat)
self.assertEqual(form.save().name, 'Third')
self.assertEqual(Category.objects.get(id=cat.id).name, 'Third')
def test_runtime_choicefield_populated(self):
self.maxDiff = None
# Here, we demonstrate that choices for a ForeignKey ChoiceField are determined
# at runtime, based on the data in the database when the form is displayed, not
# the data in the database when the form is instantiated.
self.create_basic_data()
f = ArticleForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
</select> </li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk))
c4 = Category.objects.create(name='Fourth', url='4th')
w_bernstein = Writer.objects.create(name='Carl Bernstein')
self.assertHTMLEqual(
f.as_ul(),
'''<li>Headline: <input type="text" name="headline" maxlength="50" /></li>
<li>Slug: <input type="text" name="slug" maxlength="50" /></li>
<li>Pub date: <input type="text" name="pub_date" /></li>
<li>Writer: <select name="writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Carl Bernstein</option>
<option value="%s">Mike Royko</option>
</select></li>
<li>Article: <textarea rows="10" cols="40" name="article"></textarea></li>
<li>Categories: <select multiple="multiple" name="categories">
<option value="%s">Entertainment</option>
<option value="%s">It's a test</option>
<option value="%s">Third test</option>
<option value="%s">Fourth</option>
</select></li>
<li>Status: <select name="status">
<option value="" selected="selected">---------</option>
<option value="1">Draft</option>
<option value="2">Pending</option>
<option value="3">Live</option>
</select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk))
class ModelChoiceFieldTests(TestCase):
def setUp(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third", slug="third-test", url="third")
# ModelChoiceField ############################################################
def test_modelchoicefield(self):
f = forms.ModelChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean('')
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean(0)
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
self.assertEqual(f.clean(self.c2.id).name, "It's a test")
self.assertEqual(f.clean(self.c3.id).name, 'Third')
# Add a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
c4 = Category.objects.create(name='Fourth', url='4th')
self.assertEqual(f.clean(c4.id).name, 'Fourth')
# Delete a Category object *after* the ModelChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='4th').delete()
with self.assertRaises(ValidationError):
f.clean(c4.id)
def test_modelchoicefield_choices(self):
f = forms.ModelChoiceField(Category.objects.filter(pk=self.c1.id), required=False)
self.assertIsNone(f.clean(''))
self.assertEqual(f.clean(str(self.c1.id)).name, "Entertainment")
with self.assertRaises(ValidationError):
f.clean('100')
# len can be called on choices
self.assertEqual(len(f.choices), 2)
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertEqual(f.clean(self.c2.id).name, "It's a test")
with self.assertRaises(ValidationError):
f.clean(self.c3.id)
# check that we can safely iterate choices repeatedly
gen_one = list(f.choices)
gen_two = f.choices
self.assertEqual(gen_one[2], (self.c2.pk, "It's a test"))
self.assertEqual(list(gen_two), [
('', '---------'),
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
# check that we can override the label_from_instance method to print custom labels (#4620)
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "category " + str(obj)
self.assertEqual(list(f.choices), [
('', '---------'),
(self.c1.pk, 'category Entertainment'),
(self.c2.pk, "category It's a test"),
(self.c3.pk, 'category Third')])
def test_modelchoicefield_11183(self):
"""
Regression test for ticket #11183.
"""
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all())
form1 = ModelChoiceForm()
field1 = form1.fields['category']
# To allow the widget to change the queryset of field1.widget.choices correctly,
# without affecting other forms, the following must hold:
self.assertIsNot(field1, ModelChoiceForm.base_fields['category'])
self.assertIs(field1.widget.choices.field, field1)
def test_modelchoicefield_22745(self):
"""
#22745 -- Make sure that ModelChoiceField with RadioSelect widget
doesn't produce unnecessary db queries when accessing its BoundField's
attrs.
"""
class ModelChoiceForm(forms.Form):
category = forms.ModelChoiceField(Category.objects.all(), widget=forms.RadioSelect)
form = ModelChoiceForm()
field = form['category'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
class ModelMultipleChoiceFieldTests(TestCase):
def setUp(self):
self.c1 = Category.objects.create(
name="Entertainment", slug="entertainment", url="entertainment")
self.c2 = Category.objects.create(
name="It's a test", slug="its-test", url="test")
self.c3 = Category.objects.create(
name="Third", slug="third-test", url="third")
def test_model_multiple_choice_field(self):
f = forms.ModelMultipleChoiceField(Category.objects.all())
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test"),
(self.c3.pk, 'Third')])
with self.assertRaises(ValidationError):
f.clean(None)
with self.assertRaises(ValidationError):
f.clean([])
self.assertQuerysetEqual(f.clean([self.c1.id]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
self.assertQuerysetEqual(f.clean([str(self.c1.id)]), ["Entertainment"])
self.assertQuerysetEqual(f.clean([str(self.c1.id), str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False)
self.assertQuerysetEqual(f.clean([self.c1.id, str(self.c2.id)]),
["Entertainment", "It's a test"], ordered=False)
self.assertQuerysetEqual(f.clean((self.c1.id, str(self.c2.id))),
["Entertainment", "It's a test"], ordered=False)
with self.assertRaises(ValidationError):
f.clean(['100'])
with self.assertRaises(ValidationError):
f.clean('hello')
with self.assertRaises(ValidationError):
f.clean(['fail'])
# Invalid types that require TypeError to be caught (#22808).
with self.assertRaises(ValidationError):
f.clean([['fail']])
with self.assertRaises(ValidationError):
f.clean([{'foo': 'bar'}])
# Add a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
# Note, we are using an id of 1006 here since tests that run before
# this may create categories with primary keys up to 6. Use
# a number that will not conflict.
c6 = Category.objects.create(id=1006, name='Sixth', url='6th')
self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"])
# Delete a Category object *after* the ModelMultipleChoiceField has already been
# instantiated. This proves clean() checks the database during clean() rather
# than caching it at time of instantiation.
Category.objects.get(url='6th').delete()
with self.assertRaises(ValidationError):
f.clean([c6.id])
def test_model_multiple_choice_required_false(self):
f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False)
self.assertIsInstance(f.clean([]), EmptyQuerySet)
self.assertIsInstance(f.clean(()), EmptyQuerySet)
with self.assertRaises(ValidationError):
f.clean(['0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c3.id), '0'])
with self.assertRaises(ValidationError):
f.clean([str(self.c1.id), '0'])
# queryset can be changed after the field is created.
f.queryset = Category.objects.exclude(name='Third')
self.assertEqual(list(f.choices), [
(self.c1.pk, 'Entertainment'),
(self.c2.pk, "It's a test")])
self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"])
with self.assertRaises(ValidationError):
f.clean([self.c3.id])
with self.assertRaises(ValidationError):
f.clean([str(self.c2.id), str(self.c3.id)])
f.queryset = Category.objects.all()
f.label_from_instance = lambda obj: "multicategory " + str(obj)
self.assertEqual(list(f.choices), [
(self.c1.pk, 'multicategory Entertainment'),
(self.c2.pk, "multicategory It's a test"),
(self.c3.pk, 'multicategory Third')])
def test_model_multiple_choice_number_of_queries(self):
"""
Test that ModelMultipleChoiceField does O(1) queries instead of
O(n) (#10156).
"""
persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)]
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]])
def test_model_multiple_choice_run_validators(self):
"""
Test that ModelMultipleChoiceField run given validators (#14144).
"""
for i in range(30):
Writer.objects.create(name="Person %s" % i)
self._validator_run = False
def my_validator(value):
self._validator_run = True
f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(),
validators=[my_validator])
f.clean([p.pk for p in Writer.objects.all()[8:9]])
self.assertTrue(self._validator_run)
def test_model_multiple_choice_show_hidden_initial(self):
"""
Test support of show_hidden_initial by ModelMultipleChoiceField.
"""
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(show_hidden_initial=True,
queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
person2 = Writer.objects.create(name="Person 2")
form = WriterForm(initial={'persons': [person1, person2]},
data={'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person1.pk), str(person2.pk)]})
self.assertTrue(form.is_valid())
self.assertFalse(form.has_changed())
form = WriterForm(initial={'persons': [person1, person2]},
data={'initial-persons': [str(person1.pk), str(person2.pk)],
'persons': [str(person2.pk)]})
self.assertTrue(form.is_valid())
self.assertTrue(form.has_changed())
def test_model_multiple_choice_field_22745(self):
"""
#22745 -- Make sure that ModelMultipleChoiceField with
CheckboxSelectMultiple widget doesn't produce unnecessary db queries
when accessing its BoundField's attrs.
"""
class ModelMultipleChoiceForm(forms.Form):
categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple)
form = ModelMultipleChoiceForm()
field = form['categories'] # BoundField
template = Template('{{ field.name }}{{ field }}{{ field.help_text }}')
with self.assertNumQueries(1):
template.render(Context({'field': field}))
def test_show_hidden_initial_changed_queries_efficiently(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(
show_hidden_initial=True, queryset=Writer.objects.all())
writers = (Writer.objects.create(name=str(x)) for x in range(0, 50))
writer_pks = tuple(x.pk for x in writers)
form = WriterForm(data={'initial-persons': writer_pks})
with self.assertNumQueries(1):
self.assertTrue(form.has_changed())
def test_clean_does_deduplicate_values(self):
class WriterForm(forms.Form):
persons = forms.ModelMultipleChoiceField(queryset=Writer.objects.all())
person1 = Writer.objects.create(name="Person 1")
form = WriterForm(data={})
queryset = form.fields['persons'].clean([str(person1.pk)] * 50)
sql, params = queryset.query.sql_with_params()
self.assertEqual(len(params), 1)
class ModelOneToOneFieldTests(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = '__all__'
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = '__all__'
self.assertEqual(list(ImprovedArticleForm.base_fields), ['article'])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional `score` field
model = BetterWriter
fields = '__all__'
bw = BetterWriter.objects.create(name='Joe Better', score=10)
self.assertEqual(sorted(model_to_dict(bw)),
['id', 'name', 'score', 'writer_ptr'])
form = BetterWriterForm({'name': 'Some Name', 'score': 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = '__all__'
self.w_royko = Writer.objects.create(name='Mike Royko')
self.w_woodward = Writer.objects.create(name='Bob Woodward')
form = WriterProfileForm()
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="" selected="selected">---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" /></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
data = {
'writer': six.text_type(self.w_woodward.pk),
'age': '65',
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(six.text_type(instance), 'Bob Woodward is 65')
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(
form.as_p(),
'''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer">
<option value="">---------</option>
<option value="%s" selected="selected">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label> <input type="number" name="age" value="65" id="id_age" min="0" /></p>''' % (
self.w_woodward.pk, self.w_royko.pk,
)
)
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda",
date_published=datetime.date(1991, 8, 22))
author = Author.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['publication'], None)
author = form.save()
# author object returned from form still retains original publication object
# that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertEqual(new_author.publication, None)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ['publication', 'full_name']
publication = Publication.objects.create(title="Pravda",
date_published=datetime.date(1991, 8, 22))
author = Author1.objects.create(publication=publication, full_name='John Doe')
form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author)
self.assertFalse(form.is_valid())
class FileAndImageFieldTests(TestCase):
def test_clean_false(self):
"""
If the ``clean`` method on a non-required FileField receives False as
the data (meaning clear the field value), it returns False, regardless
of the value of ``initial``.
"""
f = forms.FileField(required=False)
self.assertEqual(f.clean(False), False)
self.assertEqual(f.clean(False, 'initial'), False)
def test_clean_false_required(self):
"""
If the ``clean`` method on a required FileField receives False as the
data, it has the same effect as None: initial is returned if non-empty,
otherwise the validation catches the lack of a required value.
"""
f = forms.FileField(required=True)
self.assertEqual(f.clean(False, 'initial'), 'initial')
with self.assertRaises(ValidationError):
f.clean(False)
def test_full_clear(self):
"""
Integration happy-path test that a model FileField can actually be set
and cleared via a ModelForm.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm()
self.assertIn('name="myfile"', six.text_type(form))
self.assertNotIn('myfile-clear', six.text_type(form))
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
self.assertEqual(doc.myfile.name, 'something.txt')
form = DocumentForm(instance=doc)
self.assertIn('myfile-clear', six.text_type(form))
form = DocumentForm(instance=doc, data={'myfile-clear': 'true'})
doc = form.save(commit=False)
self.assertEqual(bool(doc.myfile), False)
def test_clear_and_file_contradiction(self):
"""
If the user submits a new file upload AND checks the clear checkbox,
they get a validation error, and the bound redisplay of the form still
includes the current file and the clear checkbox.
"""
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')})
self.assertTrue(form.is_valid())
doc = form.save(commit=False)
form = DocumentForm(instance=doc,
files={'myfile': SimpleUploadedFile('something.txt', b'content')},
data={'myfile-clear': 'true'})
self.assertTrue(not form.is_valid())
self.assertEqual(form.errors['myfile'],
['Please either submit a file or check the clear checkbox, not both.'])
rendered = six.text_type(form)
self.assertIn('something.txt', rendered)
self.assertIn('myfile-clear', rendered)
def test_render_empty_file_field(self):
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = '__all__'
doc = Document.objects.create()
form = DocumentForm(instance=doc)
self.assertEqual(
str(form['myfile']),
'<input id="id_myfile" name="myfile" type="file" />'
)
def test_file_field_data(self):
# Test conditions when files is either not given or empty.
f = TextFileForm(data={'description': 'Assistance'})
self.assertFalse(f.is_valid())
f = TextFileForm(data={'description': 'Assistance'}, files={})
self.assertFalse(f.is_valid())
# Upload a file and ensure it all works as expected.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
instance.file.delete()
# If the previous file has been deleted, the file name can be reused
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test1.txt', b'hello world')})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Check if the max_length attribute has been inherited from the model.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')})
self.assertFalse(f.is_valid())
# Edit an instance that already has the file defined in the model. This will not
# save the file again, but leave it exactly as it is.
f = TextFileForm(
data={'description': 'Assistance'},
instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt')
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test1.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
# Override the file by uploading a new one.
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test2.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_filefield_required_false(self):
# Test the non-required FileField
f = TextFileForm(data={'description': 'Assistance'})
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, '')
f = TextFileForm(
data={'description': 'Assistance'},
files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Instance can be edited w/out re-uploading the file and existing file should be preserved.
f = TextFileForm(
data={'description': 'New Description'},
instance=instance)
f.fields['file'].required = False
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.file.name, 'tests/test3.txt')
# Delete the current file since this is not done by Django.
instance.file.delete()
instance.delete()
def test_custom_file_field_save(self):
"""
Regression for #11149: save_form_data should be called only once
"""
class CFFForm(forms.ModelForm):
class Meta:
model = CustomFF
fields = '__all__'
# It's enough that the form saves without error -- the custom save routine will
# generate an AssertionError if it is called more than once during save.
form = CFFForm(data={'f': None})
form.save()
def test_file_field_multiple_save(self):
"""
Simulate a file upload and check how many times Model.save() gets
called. Test for bug #639.
"""
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = '__all__'
# Grab an image for testing.
filename = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with open(filename, "rb") as fp:
img = fp.read()
# Fake a POST QueryDict and FILES MultiValueDict.
data = {'title': 'Testing'}
files = {"image": SimpleUploadedFile('test.png', img, 'image/png')}
form = PhotoForm(data=data, files=files)
p = form.save()
try:
# Check the savecount stored on the object (see the model).
self.assertEqual(p._savecount, 1)
finally:
# Delete the "uploaded" file to avoid clogging /tmp.
p = Photo.objects.get()
p.image.delete(save=False)
def test_file_path_field_blank(self):
"""
Regression test for #8842: FilePathField(blank=True)
"""
class FPForm(forms.ModelForm):
class Meta:
model = FilePathModel
fields = '__all__'
form = FPForm()
names = [p[1] for p in form['path'].field.choices]
names.sort()
self.assertEqual(names, ['---------', '__init__.py', 'models.py', 'test_uuid.py', 'tests.py'])
@skipUnless(test_images, "Pillow not installed")
def test_image_field(self):
# ImageField and FileField are nearly identical, but they differ slightly when
# it comes to validation. This specifically tests that #6302 is fixed for
# both file fields and image fields.
with open(os.path.join(os.path.dirname(upath(__file__)), "test.png"), 'rb') as fp:
image_data = fp.read()
with open(os.path.join(os.path.dirname(upath(__file__)), "test2.png"), 'rb') as fp:
image_data2 = fp.read()
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
f = ImageFileForm(
data={'description': 'An image'},
files={'image': SimpleUploadedFile('test.png', image_data)})
self.assertTrue(f.is_valid())
self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile)
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Edit an instance that already has the (required) image defined in the model. This will not
# save the image again, but leave it exactly as it is.
f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png')
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test.png')
self.assertEqual(instance.height, 16)
self.assertEqual(instance.width, 16)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
# Override the file by uploading a new one.
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
f = ImageFileForm(
data={'description': 'Changed it'},
files={'image': SimpleUploadedFile('test2.png', image_data2)})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test2.png')
self.assertEqual(instance.height, 32)
self.assertEqual(instance.width, 48)
# Delete the current file since this is not done by Django, but don't save
# because the dimension fields are not null=True.
instance.image.delete(save=False)
instance.delete()
# Test the non-required ImageField
# Note: In Oracle, we expect a null ImageField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_imagefield_repr = ''
else:
expected_null_imagefield_repr = None
f = OptionalImageFileForm(data={'description': 'Test'})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, expected_null_imagefield_repr)
self.assertEqual(instance.width, None)
self.assertEqual(instance.height, None)
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Editing the instance without re-uploading the image should not affect
# the image or its width/height properties.
f = OptionalImageFileForm(
data={'description': 'New Description'},
instance=instance)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.description, 'New Description')
self.assertEqual(instance.image.name, 'tests/test3.png')
self.assertEqual(instance.width, 16)
self.assertEqual(instance.height, 16)
# Delete the current file since this is not done by Django.
instance.image.delete()
instance.delete()
f = OptionalImageFileForm(
data={'description': 'And a final one'},
files={'image': SimpleUploadedFile('test4.png', image_data2)}
)
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'tests/test4.png')
self.assertEqual(instance.width, 48)
self.assertEqual(instance.height, 32)
instance.delete()
# Test callable upload_to behavior that's dependent on the value of another field in the model
f = ImageFileForm(
data={'description': 'And a final one', 'path': 'foo'},
files={'image': SimpleUploadedFile('test4.png', image_data)})
self.assertTrue(f.is_valid())
instance = f.save()
self.assertEqual(instance.image.name, 'foo/test4.png')
instance.delete()
class ModelOtherFieldTests(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({'biggie': '-9223372036854775808'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '-9223372036854775809'})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']}
)
bif = BigIntForm({'biggie': '9223372036854775807'})
self.assertTrue(bif.is_valid())
bif = BigIntForm({'biggie': '9223372036854775808'})
self.assertFalse(bif.is_valid())
self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']})
def test_comma_separated_integer_field(self):
class CommaSeparatedIntegerForm(forms.ModelForm):
class Meta:
model = CommaSeparatedInteger
fields = '__all__'
f = CommaSeparatedIntegerForm({'field': '1'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '1'})
f = CommaSeparatedIntegerForm({'field': '12'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '12'})
f = CommaSeparatedIntegerForm({'field': '1,2,3'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '1,2,3'})
f = CommaSeparatedIntegerForm({'field': '10,32'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'field': '10,32'})
f = CommaSeparatedIntegerForm({'field': '1a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': ',,,,'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1.2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,a,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
f = CommaSeparatedIntegerForm({'field': '1,,2'})
self.assertEqual(f.errors, {'field': ['Enter only digits separated by commas.']})
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
self.assertFalse(HomepageForm({'url': 'foo'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid())
self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid())
self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid())
def test_modelform_non_editable_field(self):
"""
When explicitely including a non-editable field in a ModelForm, the
error message should be explicit.
"""
# 'created', non-editable, is excluded by default
self.assertNotIn('created', ArticleForm().fields)
msg = "'created' cannot be specified for Article model form as it is a non-editable field"
with self.assertRaisesMessage(FieldError, msg):
class InvalidArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ('headline', 'created')
def test_http_prefixing(self):
"""
If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613)
"""
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = '__all__'
form = HomepageForm({'url': 'example.com'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com')
form = HomepageForm({'url': 'example.com/test'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['url'], 'http://example.com/test')
class OtherModelFormTests(TestCase):
def test_media_on_modelform(self):
# Similar to a regular Form class you can define custom media to be used on
# the ModelForm.
f = ModelFormWithMedia()
self.assertHTMLEqual(
six.text_type(f.media),
'''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/form/javascript"></script>'''
)
def test_choices_type(self):
# Choices on CharField and IntegerField
f = ArticleForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('42')
f = ArticleStatusForm()
with self.assertRaises(ValidationError):
f.fields['status'].clean('z')
def test_prefetch_related_queryset(self):
"""
ModelChoiceField should respect a prefetch_related() on its queryset.
"""
blue = Colour.objects.create(name='blue')
red = Colour.objects.create(name='red')
multicolor_item = ColourfulItem.objects.create()
multicolor_item.colours.add(blue, red)
red_item = ColourfulItem.objects.create()
red_item.colours.add(red)
class ColorModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return ', '.join(c.name for c in obj.colours.all())
field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related('colours'))
with self.assertNumQueries(4): # would be 5 if prefetch is ignored
self.assertEqual(tuple(field.choices), (
('', '---------'),
(multicolor_item.pk, 'blue, red'),
(red_item.pk, 'red'),
))
def test_foreignkeys_which_use_to_field(self):
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(barcode=87, name='Core', parent=apple)
field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), (
('', '---------'),
(86, 'Apple'),
(87, 'Core'),
(22, 'Pear')))
form = InventoryForm(instance=core)
self.assertHTMLEqual(six.text_type(form['parent']), '''<select name="parent" id="id_parent">
<option value="">---------</option>
<option value="86" selected="selected">Apple</option>
<option value="87">Core</option>
<option value="22">Pear</option>
</select>''')
data = model_to_dict(core)
data['parent'] = '22'
form = InventoryForm(data=data, instance=core)
core = form.save()
self.assertEqual(core.parent.name, 'Pear')
class CategoryForm(forms.ModelForm):
description = forms.CharField()
class Meta:
model = Category
fields = ['description', 'url']
self.assertEqual(list(CategoryForm.base_fields),
['description', 'url'])
self.assertHTMLEqual(
six.text_type(CategoryForm()),
'''<tr><th><label for="id_description">Description:</label></th>
<td><input type="text" name="description" id="id_description" /></td></tr>
<tr><th><label for="id_url">The URL:</label></th>
<td><input id="id_url" type="text" name="url" maxlength="40" /></td></tr>'''
)
# to_field_name should also work on ModelMultipleChoiceField ##################
field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode')
self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear')))
self.assertQuerysetEqual(field.clean([86]), ['Apple'])
form = SelectInventoryForm({'items': [87, 22]})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data), 1)
self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear'])
def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self):
self.assertEqual(list(CustomFieldForExclusionForm.base_fields),
['name'])
self.assertHTMLEqual(
six.text_type(CustomFieldForExclusionForm()),
'''<tr><th><label for="id_name">Name:</label></th>
<td><input id="id_name" type="text" name="name" maxlength="10" /></td></tr>'''
)
def test_iterable_model_m2m(self):
class ColourfulItemForm(forms.ModelForm):
class Meta:
model = ColourfulItem
fields = '__all__'
colour = Colour.objects.create(name='Blue')
form = ColourfulItemForm()
self.maxDiff = 1024
self.assertHTMLEqual(
form.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" /></p>
<p><label for="id_colours">Colours:</label> <select multiple="multiple" name="colours" id="id_colours">
<option value="%(blue_pk)s">Blue</option>
</select></p>"""
% {'blue_pk': colour.pk})
def test_callable_field_default(self):
class PublicationDefaultsForm(forms.ModelForm):
class Meta:
model = PublicationDefaults
fields = '__all__'
self.maxDiff = 2000
form = PublicationDefaultsForm()
today_str = str(datetime.date.today())
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_title">Title:</label> <input id="id_title" maxlength="30" name="title" type="text" /></p>
<p><label for="id_date_published">Date published:</label>
<input id="id_date_published" name="date_published" type="text" value="{0}" />
<input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}" /></p>
<p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode">
<option value="di" selected="selected">direct</option>
<option value="de">delayed</option></select>
<input id="initial-id_mode" name="initial-mode" type="hidden" value="di" /></p>
<p><label for="id_category">Category:</label> <select id="id_category" name="category">
<option value="1">Games</option>
<option value="2">Comics</option>
<option value="3" selected="selected">Novel</option></select>
<input id="initial-id_category" name="initial-category" type="hidden" value="3" />
""".format(today_str)
)
empty_data = {
'title': '',
'date_published': today_str,
'initial-date_published': today_str,
'mode': 'di',
'initial-mode': 'di',
'category': '3',
'initial-category': '3',
}
bound_form = PublicationDefaultsForm(empty_data)
self.assertFalse(bound_form.has_changed())
class ModelFormCustomErrorTests(SimpleTestCase):
def test_custom_error_messages(self):
data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors['name1']),
'<ul class="errorlist"><li>Form custom error message.</li></ul>'
)
self.assertHTMLEqual(
str(errors['name2']),
'<ul class="errorlist"><li>Model custom error message.</li></ul>'
)
def test_model_clean_error_messages(self):
data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages.</li></ul>'
)
data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors['name1']),
'<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>'
)
data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['__all__'], ['Global error message.'])
class CustomCleanTests(TestCase):
def test_override_clean(self):
"""
Regression for #12596: Calling super from ModelForm.clean() should be
optional.
"""
class TripleFormWithCleanOverride(forms.ModelForm):
class Meta:
model = Triple
fields = '__all__'
def clean(self):
if not self.cleaned_data['left'] == self.cleaned_data['right']:
raise forms.ValidationError('Left and right should be equal')
return self.cleaned_data
form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1})
self.assertTrue(form.is_valid())
# form.instance.left will be None if the instance was not constructed
# by form.full_clean().
self.assertEqual(form.instance.left, 1)
def test_model_form_clean_applies_to_model(self):
"""
Regression test for #12960. Make sure the cleaned_data returned from
ModelForm.clean() is applied to the model instance.
"""
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
def clean(self):
self.cleaned_data['name'] = self.cleaned_data['name'].upper()
return self.cleaned_data
data = {'name': 'Test', 'slug': 'test', 'url': '/test'}
form = CategoryForm(data)
category = form.save()
self.assertEqual(category.name, 'TEST')
class ModelFormInheritanceTests(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = '__all__'
self.assertEqual(list(ModelForm().fields.keys()), ['name', 'age'])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = '__all__'
class Mixin(object):
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (Mixin, Form), {})().fields.keys()), [])
self.assertEqual(list(type(str('NewForm'), (Form2, Mixin, Form), {})().fields.keys()), ['foo'])
self.assertEqual(list(type(str('NewForm'), (Mixin, ModelForm, Form), {})().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Mixin, Form), {})().fields.keys()), ['name'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Form, Mixin), {})().fields.keys()), ['name', 'age'])
self.assertEqual(list(type(str('NewForm'), (ModelForm, Form), {'age': None})().fields.keys()), ['name'])
def test_field_removal_name_clashes(self):
"""Regression test for https://code.djangoproject.com/ticket/22510."""
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = '__all__'
class SubForm(MyForm):
media = None
self.assertIn('media', MyForm().fields)
self.assertNotIn('media', SubForm().fields)
self.assertTrue(hasattr(MyForm, 'media'))
self.assertTrue(hasattr(SubForm, 'media'))
class StumpJokeForm(forms.ModelForm):
class Meta:
model = StumpJoke
fields = '__all__'
class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field):
queryset = 42
class StumpJokeWithCustomFieldForm(forms.ModelForm):
custom = CustomFieldWithQuerysetButNoLimitChoicesTo()
class Meta:
model = StumpJoke
fields = () # We don't need any fields from the model
class LimitChoicesToTest(TestCase):
"""
Tests the functionality of ``limit_choices_to``.
"""
def setUp(self):
self.threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
self.marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
def test_limit_choices_to_callable_for_fk_rel(self):
"""
A ForeignKey relation can use ``limit_choices_to`` as a callable, re #2554.
"""
stumpjokeform = StumpJokeForm()
self.assertIn(self.threepwood, stumpjokeform.fields['most_recently_fooled'].queryset)
self.assertNotIn(self.marley, stumpjokeform.fields['most_recently_fooled'].queryset)
def test_limit_choices_to_callable_for_m2m_rel(self):
"""
A ManyToMany relation can use ``limit_choices_to`` as a callable, re #2554.
"""
stumpjokeform = StumpJokeForm()
self.assertIn(self.threepwood, stumpjokeform.fields['has_fooled_today'].queryset)
self.assertNotIn(self.marley, stumpjokeform.fields['has_fooled_today'].queryset)
def test_custom_field_with_queryset_but_no_limit_choices_to(self):
"""
Regression test for #23795: Make sure a custom field with a `queryset`
attribute but no `limit_choices_to` still works.
"""
f = StumpJokeWithCustomFieldForm()
self.assertEqual(f.fields['custom'].queryset, 42)
class FormFieldCallbackTests(SimpleTestCase):
def test_baseform_with_widgets_in_meta(self):
"""Regression for #13095: Using base forms with widgets defined in Meta should not raise errors."""
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
Form = modelform_factory(Person, form=BaseForm)
self.assertIs(Form.base_fields['name'].widget, widget)
def test_factory_with_widget_argument(self):
""" Regression for #15315: modelform_factory should accept widgets
argument
"""
widget = forms.Textarea()
# Without a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__")
self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
# With a widget should not set the widget to textarea
Form = modelform_factory(Person, fields="__all__", widgets={'name': widget})
self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea)
def test_modelform_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelform_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelform_factory(Person)
def test_modelform_factory_with_all_fields(self):
""" Regression for #19733 """
form = modelform_factory(Person, fields="__all__")
self.assertEqual(list(form.base_fields), ["name"])
def test_custom_callback(self):
"""Test that a custom formfield_callback is used if provided"""
callback_args = []
def callback(db_field, **kwargs):
callback_args.append((db_field, kwargs))
return db_field.formfield(**kwargs)
widget = forms.Textarea()
class BaseForm(forms.ModelForm):
class Meta:
model = Person
widgets = {'name': widget}
fields = "__all__"
modelform_factory(Person, form=BaseForm, formfield_callback=callback)
id_field, name_field = Person._meta.fields
self.assertEqual(callback_args,
[(id_field, {}), (name_field, {'widget': widget})])
def test_bad_callback(self):
# A bad callback provided by user still gives an error
with self.assertRaises(TypeError):
modelform_factory(Person, fields="__all__", formfield_callback='not a function or callable')
def test_inherit_after_custom_callback(self):
def callback(db_field, **kwargs):
if isinstance(db_field, models.CharField):
return forms.CharField(widget=forms.Textarea)
return db_field.formfield(**kwargs)
class BaseForm(forms.ModelForm):
class Meta:
model = Person
fields = '__all__'
NewForm = modelform_factory(Person, form=BaseForm, formfield_callback=callback)
class InheritedForm(NewForm):
pass
for name in NewForm.base_fields.keys():
self.assertEqual(
type(InheritedForm.base_fields[name].widget),
type(NewForm.base_fields[name].widget)
)
class LocalizedModelFormTest(TestCase):
def test_model_form_applies_localize_to_some_fields(self):
class PartiallyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = ('left', 'right',)
fields = '__all__'
f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertFalse(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_applies_localize_to_all_fields(self):
class FullyLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = '__all__'
fields = '__all__'
f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10})
self.assertTrue(f.is_valid())
self.assertTrue(f.fields['left'].localize)
self.assertTrue(f.fields['middle'].localize)
self.assertTrue(f.fields['right'].localize)
def test_model_form_refuses_arbitrary_string(self):
with self.assertRaises(TypeError):
class BrokenLocalizedTripleForm(forms.ModelForm):
class Meta:
model = Triple
localized_fields = "foo"
class CustomMetaclass(ModelFormMetaclass):
def __new__(cls, name, bases, attrs):
new = super(CustomMetaclass, cls).__new__(cls, name, bases, attrs)
new.base_fields = {}
return new
class CustomMetaclassForm(six.with_metaclass(CustomMetaclass, forms.ModelForm)):
pass
class CustomMetaclassTestCase(SimpleTestCase):
def test_modelform_factory_metaclass(self):
new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm)
self.assertEqual(new_cls.base_fields, {})
class StrictAssignmentTests(TestCase):
"""
Should a model do anything special with __setattr__() or descriptors which
raise a ValidationError, a model form should catch the error (#24706).
"""
def test_setattr_raises_validation_error_field_specific(self):
"""
A model ValidationError using the dict form should put the error
message into the correct key of form.errors.
"""
form_class = modelform_factory(model=StrictAssignmentFieldSpecific, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'title': ['Cannot set attribute', 'This field cannot be blank.']
})
def test_setattr_raises_validation_error_non_field(self):
"""
A model ValidationError not using the dict form should put the error
message into __all__ (i.e. non-field errors) on the form.
"""
form_class = modelform_factory(model=StrictAssignmentAll, fields=['title'])
form = form_class(data={'title': 'testing setattr'}, files=None)
# This line turns on the ValidationError; it avoids the model erroring
# when its own __init__() is called when creating form.instance.
form.instance._should_error = True
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'__all__': ['Cannot set attribute'],
'title': ['This field cannot be blank.']
})
| gitaarik/django | tests/model_forms/tests.py | Python | bsd-3-clause | 116,011 |
"""Test Home Assistant template helper methods."""
from datetime import datetime
import unittest
import random
from unittest.mock import patch
from homeassistant.components import group
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
from homeassistant.util.unit_system import UnitSystem
from homeassistant.const import (
LENGTH_METERS,
TEMP_CELSIUS,
MASS_GRAMS,
VOLUME_LITERS,
MATCH_ALL,
)
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
class TestHelpersTemplate(unittest.TestCase):
"""Test the Template."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup the tests."""
self.hass = get_test_home_assistant()
self.hass.config.units = UnitSystem('custom', TEMP_CELSIUS,
LENGTH_METERS, VOLUME_LITERS,
MASS_GRAMS)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_referring_states_by_entity_id(self):
"""Test referring states by entity id."""
self.hass.states.set('test.object', 'happy')
self.assertEqual(
'happy',
template.Template(
'{{ states.test.object.state }}', self.hass).render())
def test_iterating_all_states(self):
"""Test iterating all states."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.temperature', 10)
self.assertEqual(
'10happy',
template.Template(
'{% for state in states %}{{ state.state }}{% endfor %}',
self.hass).render())
def test_iterating_domain_states(self):
"""Test iterating domain states."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.back_door', 'open')
self.hass.states.set('sensor.temperature', 10)
self.assertEqual(
'open10',
template.Template("""
{% for state in states.sensor %}{{ state.state }}{% endfor %}
""", self.hass).render())
def test_float(self):
"""Test float."""
self.hass.states.set('sensor.temperature', '12')
self.assertEqual(
'12.0',
template.Template(
'{{ float(states.sensor.temperature.state) }}',
self.hass).render())
self.assertEqual(
'True',
template.Template(
'{{ float(states.sensor.temperature.state) > 11 }}',
self.hass).render())
def test_rounding_value(self):
"""Test rounding value."""
self.hass.states.set('sensor.temperature', 12.78)
self.assertEqual(
'12.8',
template.Template(
'{{ states.sensor.temperature.state | round(1) }}',
self.hass).render())
self.assertEqual(
'128',
template.Template(
'{{ states.sensor.temperature.state | multiply(10) | round }}',
self.hass).render())
def test_rounding_value_get_original_value_on_error(self):
"""Test rounding value get original value on error."""
self.assertEqual(
'None',
template.Template('{{ None | round }}', self.hass).render())
self.assertEqual(
'no_number',
template.Template(
'{{ "no_number" | round }}', self.hass).render())
def test_multiply(self):
"""Test multiply."""
tests = {
None: 'None',
10: '100',
'"abcd"': 'abcd'
}
for inp, out in tests.items():
self.assertEqual(
out,
template.Template('{{ %s | multiply(10) | round }}' % inp,
self.hass).render())
def test_strptime(self):
"""Test the parse timestamp method."""
tests = [
('2016-10-19 15:22:05.588122 UTC',
'%Y-%m-%d %H:%M:%S.%f %Z', None),
('2016-10-19 15:22:05.588122+0100',
'%Y-%m-%d %H:%M:%S.%f%z', None),
('2016-10-19 15:22:05.588122',
'%Y-%m-%d %H:%M:%S.%f', None),
('2016-10-19', '%Y-%m-%d', None),
('2016', '%Y', None),
('15:22:05', '%H:%M:%S', None),
('1469119144', '%Y', '1469119144'),
('invalid', '%Y', 'invalid')
]
for inp, fmt, expected in tests:
if expected is None:
expected = datetime.strptime(inp, fmt)
temp = '{{ strptime(\'%s\', \'%s\') }}' % (inp, fmt)
self.assertEqual(
str(expected),
template.Template(temp, self.hass).render())
def test_timestamp_custom(self):
"""Test the timestamps to custom filter."""
now = dt_util.utcnow()
tests = [
(None, None, None, 'None'),
(1469119144, None, True, '2016-07-21 16:39:04'),
(1469119144, '%Y', True, '2016'),
(1469119144, 'invalid', True, 'invalid'),
(dt_util.as_timestamp(now), None, False,
now.strftime('%Y-%m-%d %H:%M:%S'))
]
for inp, fmt, local, out in tests:
if fmt:
fil = 'timestamp_custom(\'{}\')'.format(fmt)
elif fmt and local:
fil = 'timestamp_custom(\'{0}\', {1})'.format(fmt, local)
else:
fil = 'timestamp_custom'
self.assertEqual(
out,
template.Template('{{ %s | %s }}' % (inp, fil),
self.hass).render())
def test_timestamp_local(self):
"""Test the timestamps to local filter."""
tests = {
None: 'None',
1469119144: '2016-07-21 16:39:04',
}
for inp, out in tests.items():
self.assertEqual(
out,
template.Template('{{ %s | timestamp_local }}' % inp,
self.hass).render())
def test_min(self):
"""Test the min filter."""
self.assertEqual(
'1',
template.Template('{{ [1, 2, 3] | min }}',
self.hass).render())
def test_max(self):
"""Test the max filter."""
self.assertEqual(
'3',
template.Template('{{ [1, 2, 3] | max }}',
self.hass).render())
def test_timestamp_utc(self):
"""Test the timestamps to local filter."""
now = dt_util.utcnow()
tests = {
None: 'None',
1469119144: '2016-07-21 16:39:04',
dt_util.as_timestamp(now):
now.strftime('%Y-%m-%d %H:%M:%S')
}
for inp, out in tests.items():
self.assertEqual(
out,
template.Template('{{ %s | timestamp_utc }}' % inp,
self.hass).render())
def test_as_timestamp(self):
"""Test the as_timestamp function."""
self.assertEqual("None",
template.Template('{{ as_timestamp("invalid") }}',
self.hass).render())
self.hass.mock = None
self.assertEqual("None",
template.Template('{{ as_timestamp(states.mock) }}',
self.hass).render())
tpl = '{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", ' \
'"%Y-%m-%dT%H:%M:%S%z")) }}'
self.assertEqual("1706951424.0",
template.Template(tpl, self.hass).render())
@patch.object(random, 'choice')
def test_random_every_time(self, test_choice):
"""Ensure the random filter runs every time, not just once."""
tpl = template.Template('{{ [1,2] | random }}', self.hass)
test_choice.return_value = 'foo'
self.assertEqual('foo', tpl.render())
test_choice.return_value = 'bar'
self.assertEqual('bar', tpl.render())
def test_passing_vars_as_keywords(self):
"""Test passing variables as keywords."""
self.assertEqual(
'127',
template.Template('{{ hello }}', self.hass).render(hello=127))
def test_passing_vars_as_vars(self):
"""Test passing variables as variables."""
self.assertEqual(
'127',
template.Template('{{ hello }}', self.hass).render({'hello': 127}))
def test_render_with_possible_json_value_with_valid_json(self):
"""Render with possible JSON value with valid JSON."""
tpl = template.Template('{{ value_json.hello }}', self.hass)
self.assertEqual(
'world',
tpl.render_with_possible_json_value('{"hello": "world"}'))
def test_render_with_possible_json_value_with_invalid_json(self):
"""Render with possible JSON value with invalid JSON."""
tpl = template.Template('{{ value_json }}', self.hass)
self.assertEqual(
'',
tpl.render_with_possible_json_value('{ I AM NOT JSON }'))
def test_render_with_possible_json_value_with_template_error_value(self):
"""Render with possible JSON value with template error value."""
tpl = template.Template('{{ non_existing.variable }}', self.hass)
self.assertEqual(
'-',
tpl.render_with_possible_json_value('hello', '-'))
def test_render_with_possible_json_value_with_missing_json_value(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.goodbye }}', self.hass)
self.assertEqual(
'',
tpl.render_with_possible_json_value('{"hello": "world"}'))
def test_render_with_possible_json_value_valid_with_is_defined(self):
"""Render with possible JSON value with known JSON object."""
tpl = template.Template('{{ value_json.hello|is_defined }}', self.hass)
self.assertEqual(
'world',
tpl.render_with_possible_json_value('{"hello": "world"}'))
def test_render_with_possible_json_value_undefined_json(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.bye|is_defined }}', self.hass)
self.assertEqual(
'{"hello": "world"}',
tpl.render_with_possible_json_value('{"hello": "world"}'))
def test_render_with_possible_json_value_undefined_json_error_value(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.bye|is_defined }}', self.hass)
self.assertEqual(
'',
tpl.render_with_possible_json_value('{"hello": "world"}', ''))
def test_raise_exception_on_error(self):
"""Test raising an exception on error."""
with self.assertRaises(TemplateError):
template.Template('{{ invalid_syntax').ensure_valid()
def test_if_state_exists(self):
"""Test if state exists works."""
self.hass.states.set('test.object', 'available')
tpl = template.Template(
'{% if states.test.object %}exists{% else %}not exists{% endif %}',
self.hass)
self.assertEqual('exists', tpl.render())
def test_is_state(self):
"""Test is_state method."""
self.hass.states.set('test.object', 'available')
tpl = template.Template("""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""", self.hass)
self.assertEqual('yes', tpl.render())
tpl = template.Template("""
{{ is_state("test.noobject", "available") }}
""", self.hass)
self.assertEqual('False', tpl.render())
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.hass.states.set('test.object', 'available', {'mode': 'on'})
tpl = template.Template("""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""", self.hass)
self.assertEqual('yes', tpl.render())
tpl = template.Template("""
{{ is_state_attr("test.noobject", "mode", "on") }}
""", self.hass)
self.assertEqual('False', tpl.render())
def test_states_function(self):
"""Test using states as a function."""
self.hass.states.set('test.object', 'available')
tpl = template.Template('{{ states("test.object") }}', self.hass)
self.assertEqual('available', tpl.render())
tpl2 = template.Template('{{ states("test.object2") }}', self.hass)
self.assertEqual('unknown', tpl2.render())
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_now(self, mock_is_safe):
"""Test now method."""
now = dt_util.now()
with patch.dict(template.ENV.globals, {'now': lambda: now}):
self.assertEqual(
now.isoformat(),
template.Template('{{ now().isoformat() }}',
self.hass).render())
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_utcnow(self, mock_is_safe):
"""Test utcnow method."""
now = dt_util.utcnow()
with patch.dict(template.ENV.globals, {'utcnow': lambda: now}):
self.assertEqual(
now.isoformat(),
template.Template('{{ utcnow().isoformat() }}',
self.hass).render())
def test_distance_function_with_1_state(self):
"""Test distance function with 1 state."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
tpl = template.Template('{{ distance(states.test.object) | round }}',
self.hass)
self.assertEqual('187', tpl.render())
def test_distance_function_with_2_states(self):
"""Test distance function with 2 states."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance(states.test.object, states.test.object_2) | round }}',
self.hass)
self.assertEqual('187', tpl.render())
def test_distance_function_with_1_coord(self):
"""Test distance function with 1 coord."""
tpl = template.Template(
'{{ distance("32.87336", "-117.22943") | round }}', self.hass)
self.assertEqual(
'187',
tpl.render())
def test_distance_function_with_2_coords(self):
"""Test distance function with 2 coords."""
self.assertEqual(
'187',
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (self.hass.config.latitude, self.hass.config.longitude),
self.hass).render())
def test_distance_function_with_1_state_1_coord(self):
"""Test distance function with 1 state 1 coord."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) '
'| round }}', self.hass)
self.assertEqual('187', tpl.render())
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") '
'| round }}', self.hass)
self.assertEqual('187', tpl2.render())
def test_distance_function_return_None_if_invalid_state(self):
"""Test distance function return None if invalid state."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': 10,
})
tpl = template.Template('{{ distance(states.test.object_2) | round }}',
self.hass)
self.assertEqual(
'None',
tpl.render())
def test_distance_function_return_None_if_invalid_coord(self):
"""Test distance function return None if invalid coord."""
self.assertEqual(
'None',
template.Template(
'{{ distance("123", "abc") }}', self.hass).render())
self.assertEqual(
'None',
template.Template('{{ distance("123") }}', self.hass).render())
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template('{{ distance("123", states.test_object_2) }}',
self.hass)
self.assertEqual(
'None',
tpl.render())
def test_closest_function_home_vs_domain(self):
"""Test closest function home vs domain."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_test_domain.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
self.assertEqual(
'test_domain.object',
template.Template('{{ closest(states.test_domain).entity_id }}',
self.hass).render())
def test_closest_function_home_vs_all_states(self):
"""Test closest function home vs all states."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain_2.and_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
self.assertEqual(
'test_domain_2.and_closer',
template.Template('{{ closest(states).entity_id }}',
self.hass).render())
def test_closest_function_home_vs_group_entity_id(self):
"""Test closest function home vs group entity id."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group.create_group(
self.hass, 'location group', ['test_domain.object'])
self.assertEqual(
'test_domain.object',
template.Template(
'{{ closest("group.location_group").entity_id }}',
self.hass).render())
def test_closest_function_home_vs_group_state(self):
"""Test closest function home vs group state."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group.create_group(
self.hass, 'location group', ['test_domain.object'])
self.assertEqual(
'test_domain.object',
template.Template(
'{{ closest(states.group.location_group).entity_id }}',
self.hass).render())
def test_closest_function_to_coord(self):
"""Test closest function to coord."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (self.hass.config.latitude + 0.3,
self.hass.config.longitude + 0.3), self.hass)
self.assertEqual(
'test_domain.closest_zone',
tpl.render())
def test_closest_function_to_entity_id(self):
"""Test closest function to entity id."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
self.assertEqual(
'test_domain.closest_zone',
template.Template(
'{{ closest("zone.far_away", '
'states.test_domain).entity_id }}', self.hass).render())
def test_closest_function_to_state(self):
"""Test closest function to state."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
self.assertEqual(
'test_domain.closest_zone',
template.Template(
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}', self.hass).render())
def test_closest_function_invalid_state(self):
"""Test closest function invalid state."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
for state in ('states.zone.non_existing', '"zone.non_existing"'):
self.assertEqual(
'None',
template.Template('{{ closest(%s, states) }}' % state,
self.hass).render())
def test_closest_function_state_with_invalid_location(self):
"""Test closest function state with invalid location."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': 'invalid latitude',
'longitude': self.hass.config.longitude + 0.1,
})
self.assertEqual(
'None',
template.Template(
'{{ closest(states.test_domain.closest_home, '
'states) }}', self.hass).render())
def test_closest_function_invalid_coordinates(self):
"""Test closest function invalid coordinates."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.assertEqual(
'None',
template.Template('{{ closest("invalid", "coord", states) }}',
self.hass).render())
def test_closest_function_no_location_states(self):
"""Test closest function without location states."""
self.assertEqual(
'None',
template.Template('{{ closest(states) }}', self.hass).render())
def test_extract_entities_none_exclude_stuff(self):
"""Test extract entities function with none or exclude stuff."""
self.assertEqual(MATCH_ALL, template.extract_entities(None))
self.assertEqual(
MATCH_ALL,
template.extract_entities(
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}'))
self.assertEqual(
MATCH_ALL,
template.extract_entities(
'{{ distance("123", states.test_object_2) }}'))
def test_extract_entities_no_match_entities(self):
"""Test extract entities function with none entities stuff."""
self.assertEqual(
MATCH_ALL,
template.extract_entities(
"{{ value_json.tst | timestamp_custom('%Y' True) }}"))
self.assertEqual(
MATCH_ALL,
template.extract_entities("""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},
{% endfor %}
"""))
def test_extract_entities_match_entities(self):
"""Test extract entities function with entities stuff."""
self.assertListEqual(
['device_tracker.phone_1'],
template.extract_entities("""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% else %}
Hercules is at {{ states('device_tracker.phone_1') }}.
{% endif %}
"""))
self.assertListEqual(
['binary_sensor.garage_door'],
template.extract_entities("""
{{ as_timestamp(states.binary_sensor.garage_door.last_changed) }}
"""))
self.assertListEqual(
['binary_sensor.garage_door'],
template.extract_entities("""
{{ states("binary_sensor.garage_door") }}
"""))
self.assertListEqual(
['device_tracker.phone_2'],
template.extract_entities("""
is_state_attr('device_tracker.phone_2', 'battery', 40)
"""))
self.assertListEqual(
sorted([
'device_tracker.phone_1',
'device_tracker.phone_2',
]),
sorted(template.extract_entities("""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% elif states.device_tracker.phone_2.attributes.battery < 40 %}
Hercules you power goes done!.
{% endif %}
""")))
self.assertListEqual(
sorted([
'sensor.pick_humidity',
'sensor.pick_temperature',
]),
sorted(template.extract_entities("""
{{
states.sensor.pick_temperature.state ~ „°C (“ ~
states.sensor.pick_humidity.state ~ „ %“
}}
""")))
self.assertListEqual(
sorted([
'sensor.luftfeuchtigkeit_mean',
'input_slider.luftfeuchtigkeit',
]),
sorted(template.extract_entities(
"{% if (states('sensor.luftfeuchtigkeit_mean') | int)"
" > (states('input_slider.luftfeuchtigkeit') | int +1.5)"
" %}true{% endif %}"
)))
| alexmogavero/home-assistant | tests/helpers/test_template.py | Python | apache-2.0 | 27,977 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.openstack.common import log
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mech_agent
LOG = log.getLogger(__name__)
class OpenvswitchMechanismDriver(mech_agent.AgentMechanismDriverBase):
"""Attach to networks using openvswitch L2 agent.
The OpenvswitchMechanismDriver integrates the ml2 plugin with the
openvswitch L2 agent. Port binding with this driver requires the
openvswitch agent to be running on the port's host, and that agent
to have connectivity to at least one segment of the port's
network.
"""
def __init__(self):
super(OpenvswitchMechanismDriver, self).__init__(
constants.AGENT_TYPE_OVS,
portbindings.VIF_TYPE_OVS,
True)
def check_segment_for_agent(self, segment, agent):
mappings = agent['configurations'].get('bridge_mappings', {})
tunnel_types = agent['configurations'].get('tunnel_types', [])
LOG.debug(_("Checking segment: %(segment)s "
"for mappings: %(mappings)s "
"with tunnel_types: %(tunnel_types)s"),
{'segment': segment, 'mappings': mappings,
'tunnel_types': tunnel_types})
network_type = segment[api.NETWORK_TYPE]
if network_type == 'local':
return True
elif network_type in tunnel_types:
return True
elif network_type in ['flat', 'vlan']:
return segment[api.PHYSICAL_NETWORK] in mappings
else:
return False
| ntt-sic/neutron | neutron/plugins/ml2/drivers/mech_openvswitch.py | Python | apache-2.0 | 2,288 |
import os
import unittest
from vsg.rules import attribute_specification
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_500_test_input.vhd'))
lExpected_lower = []
lExpected_lower.append('')
utils.read_file(os.path.join(sTestDir, 'rule_500_test_input.fixed_lower.vhd'), lExpected_lower)
lExpected_upper = []
lExpected_upper.append('')
utils.read_file(os.path.join(sTestDir, 'rule_500_test_input.fixed_upper.vhd'), lExpected_upper)
class test_attribute_specification_statement_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_500_lower(self):
oRule = attribute_specification.rule_500()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'attribute_specification')
self.assertEqual(oRule.identifier, '500')
lExpected = [6]
oRule.analyze(self.oFile)
self.assertEqual(utils.extract_violation_lines_from_violation_object(oRule.violations), lExpected)
def test_rule_500_upper(self):
oRule = attribute_specification.rule_500()
oRule.case = 'upper'
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'attribute_specification')
self.assertEqual(oRule.identifier, '500')
lExpected = [4]
oRule.analyze(self.oFile)
self.assertEqual(utils.extract_violation_lines_from_violation_object(oRule.violations), lExpected)
def test_fix_rule_500_lower(self):
oRule = attribute_specification.rule_500()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_lower, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
def test_fix_rule_500_upper(self):
oRule = attribute_specification.rule_500()
oRule.case = 'upper'
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_upper, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/attribute_specification/test_rule_500.py | Python | gpl-3.0 | 2,176 |
import logging
import os
import select
import SimpleHTTPServer
import socket
import SocketServer
import threading
HERE = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class ThisDirHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
path = path.split("?", 1)[0].split("#", 1)[0]
return os.path.join(HERE, *filter(None, path.split("/")))
def log_message(self, s, *args):
# output via logging so nose can catch it
logger.info(s, *args)
class ShutdownServer(SocketServer.TCPServer):
"""Mixin that allows serve_forever to be shut down.
The methods in this mixin are backported from SocketServer.py in the Python
2.6.4 standard library. The mixin is unnecessary in 2.6 and later, when
BaseServer supports the shutdown method directly.
"""
def __init__(self, use_tls, *args, **kwargs):
self.__use_tls = use_tls
SocketServer.TCPServer.__init__(self, *args, **kwargs)
self.__is_shut_down = threading.Event()
self.__serving = False
def server_bind(self):
SocketServer.TCPServer.server_bind(self)
if self.__use_tls:
import ssl
self.socket = ssl.wrap_socket(
self.socket,
os.path.join(os.path.dirname(__file__), "server.key"),
os.path.join(os.path.dirname(__file__), "server.pem"),
True,
)
def serve_forever(self, poll_interval=0.1):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self.socket], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will deadlock.
"""
self.__serving = False
self.__is_shut_down.wait()
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def start_server(handler, use_tls=False):
httpd = ShutdownServer(use_tls, ("", 0), handler)
threading.Thread(target=httpd.serve_forever).start()
_, port = httpd.socket.getsockname()
return httpd, port
| catapult-project/catapult | third_party/httplib2/python2/httplib2/test/miniserver.py | Python | bsd-3-clause | 3,785 |
#!/usr/bin/python
# Copyright 2015 Huawei Devices USA Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors:
# Chuk Orakwue <chuk.orakwue@huawei.com>
import re
from ftrace.common import ParserError
from ftrace.globals import SCHED_RAVG_WINDOW
from .register import register_parser
from collections import namedtuple
#from ftrace.third_party.cnamedtuple import namedtuple
TRACEPOINT = 'sched_task_load'
__all__ = [TRACEPOINT]
# sched_task_load: 563 (EventThread): sum=986, sum_scaled=245, period=47165 demand=111446 small=1 boost=0 reason=0 sync=0 prefer_idle=0\n\
SchedTaskLoadBase = namedtuple(TRACEPOINT,
[
'pid',
'comm',
'sum',
'sum_scaled',
'period',
'demand',
'small',
'boost',
'reason',
'sync',
'prefer_idle',
]
)
class SchedTaskLoad(SchedTaskLoadBase):
__slots__ = ()
def __new__(cls, pid, comm, _sum, sum_scaled,
period, demand, small, boost, reason,
sync, prefer_idle):
pid = int(pid)
_sum = int(_sum)
sum_scaled = int(sum_scaled)
period = int(period)
demand = int(demand)
small = int(small)
boost = int(boost)
reason = int(reason)
return super(cls, SchedTaskLoad).__new__(
cls,
pid=pid,
comm=comm,
sum=_sum,
sum_scaled=sum_scaled,
period=period,
demand=demand,
small=small,
boost=boost,
reason=reason,
sync=sync,
prefer_idle=prefer_idle,
)
@property
def wb_load(self):
"""Returns Task Load as seen by Qualcomm's Window Based Task Demand"""
return self.demand/float(SCHED_RAVG_WINDOW)
sched_task_load_pattern = re.compile(
r"""(?P<pid>\d+)\s+
\((?P<comm>.*)\):\s+
sum=(?P<_sum>\d+),\s+
sum_scaled=(?P<sum_scaled>\d+),\s+
period=(?P<period>\d+)\s+
demand=(?P<demand>\d+)\s+
small=(?P<small>\d+)\s+
boost=(?P<boost>\d+)\s+
reason=(?P<reason>\d+)\s+
sync=(?P<sync>\d+)\s+
prefer_idle=(?P<prefer_idle>\d+)
""",
re.X|re.M
)
@register_parser
def sched_task_load(payload):
"""Parser for `sched_task_load` tracepoint"""
try:
match = re.match(sched_task_load_pattern, payload)
if match:
match_group_dict = match.groupdict()
return SchedTaskLoad(**match_group_dict)
except Exception, e:
raise ParserError(e.message)
| corakwue/ftrace | ftrace/parsers/sched_task_load.py | Python | apache-2.0 | 3,158 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography.x509.base import (
Certificate, CertificateBuilder, CertificateRevocationList,
CertificateRevocationListBuilder,
CertificateSigningRequest, CertificateSigningRequestBuilder,
InvalidVersion, RevokedCertificate, RevokedCertificateBuilder,
Version, load_der_x509_certificate, load_der_x509_crl, load_der_x509_csr,
load_pem_x509_certificate, load_pem_x509_crl, load_pem_x509_csr,
random_serial_number,
)
from cryptography.x509.extensions import (
AccessDescription, AuthorityInformationAccess,
AuthorityKeyIdentifier, BasicConstraints, CRLDistributionPoints,
CRLNumber, CRLReason, CertificateIssuer, CertificatePolicies,
DistributionPoint, DuplicateExtension, ExtendedKeyUsage, Extension,
ExtensionNotFound, ExtensionType, Extensions, GeneralNames,
InhibitAnyPolicy, InvalidityDate, IssuerAlternativeName, KeyUsage,
NameConstraints, NoticeReference, OCSPNoCheck, PolicyConstraints,
PolicyInformation, ReasonFlags, SubjectAlternativeName,
SubjectKeyIdentifier, UnrecognizedExtension, UnsupportedExtension,
UserNotice
)
from cryptography.x509.general_name import (
DNSName, DirectoryName, GeneralName, IPAddress, OtherName, RFC822Name,
RegisteredID, UniformResourceIdentifier, UnsupportedGeneralNameType,
_GENERAL_NAMES
)
from cryptography.x509.name import (
Name, NameAttribute, RelativeDistinguishedName
)
from cryptography.x509.oid import (
AuthorityInformationAccessOID, CRLEntryExtensionOID,
CertificatePoliciesOID, ExtendedKeyUsageOID, ExtensionOID, NameOID,
ObjectIdentifier, SignatureAlgorithmOID, _SIG_OIDS_TO_HASH
)
OID_AUTHORITY_INFORMATION_ACCESS = ExtensionOID.AUTHORITY_INFORMATION_ACCESS
OID_AUTHORITY_KEY_IDENTIFIER = ExtensionOID.AUTHORITY_KEY_IDENTIFIER
OID_BASIC_CONSTRAINTS = ExtensionOID.BASIC_CONSTRAINTS
OID_CERTIFICATE_POLICIES = ExtensionOID.CERTIFICATE_POLICIES
OID_CRL_DISTRIBUTION_POINTS = ExtensionOID.CRL_DISTRIBUTION_POINTS
OID_EXTENDED_KEY_USAGE = ExtensionOID.EXTENDED_KEY_USAGE
OID_FRESHEST_CRL = ExtensionOID.FRESHEST_CRL
OID_INHIBIT_ANY_POLICY = ExtensionOID.INHIBIT_ANY_POLICY
OID_ISSUER_ALTERNATIVE_NAME = ExtensionOID.ISSUER_ALTERNATIVE_NAME
OID_KEY_USAGE = ExtensionOID.KEY_USAGE
OID_NAME_CONSTRAINTS = ExtensionOID.NAME_CONSTRAINTS
OID_OCSP_NO_CHECK = ExtensionOID.OCSP_NO_CHECK
OID_POLICY_CONSTRAINTS = ExtensionOID.POLICY_CONSTRAINTS
OID_POLICY_MAPPINGS = ExtensionOID.POLICY_MAPPINGS
OID_SUBJECT_ALTERNATIVE_NAME = ExtensionOID.SUBJECT_ALTERNATIVE_NAME
OID_SUBJECT_DIRECTORY_ATTRIBUTES = ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES
OID_SUBJECT_INFORMATION_ACCESS = ExtensionOID.SUBJECT_INFORMATION_ACCESS
OID_SUBJECT_KEY_IDENTIFIER = ExtensionOID.SUBJECT_KEY_IDENTIFIER
OID_DSA_WITH_SHA1 = SignatureAlgorithmOID.DSA_WITH_SHA1
OID_DSA_WITH_SHA224 = SignatureAlgorithmOID.DSA_WITH_SHA224
OID_DSA_WITH_SHA256 = SignatureAlgorithmOID.DSA_WITH_SHA256
OID_ECDSA_WITH_SHA1 = SignatureAlgorithmOID.ECDSA_WITH_SHA1
OID_ECDSA_WITH_SHA224 = SignatureAlgorithmOID.ECDSA_WITH_SHA224
OID_ECDSA_WITH_SHA256 = SignatureAlgorithmOID.ECDSA_WITH_SHA256
OID_ECDSA_WITH_SHA384 = SignatureAlgorithmOID.ECDSA_WITH_SHA384
OID_ECDSA_WITH_SHA512 = SignatureAlgorithmOID.ECDSA_WITH_SHA512
OID_RSA_WITH_MD5 = SignatureAlgorithmOID.RSA_WITH_MD5
OID_RSA_WITH_SHA1 = SignatureAlgorithmOID.RSA_WITH_SHA1
OID_RSA_WITH_SHA224 = SignatureAlgorithmOID.RSA_WITH_SHA224
OID_RSA_WITH_SHA256 = SignatureAlgorithmOID.RSA_WITH_SHA256
OID_RSA_WITH_SHA384 = SignatureAlgorithmOID.RSA_WITH_SHA384
OID_RSA_WITH_SHA512 = SignatureAlgorithmOID.RSA_WITH_SHA512
OID_COMMON_NAME = NameOID.COMMON_NAME
OID_COUNTRY_NAME = NameOID.COUNTRY_NAME
OID_DOMAIN_COMPONENT = NameOID.DOMAIN_COMPONENT
OID_DN_QUALIFIER = NameOID.DN_QUALIFIER
OID_EMAIL_ADDRESS = NameOID.EMAIL_ADDRESS
OID_GENERATION_QUALIFIER = NameOID.GENERATION_QUALIFIER
OID_GIVEN_NAME = NameOID.GIVEN_NAME
OID_LOCALITY_NAME = NameOID.LOCALITY_NAME
OID_ORGANIZATIONAL_UNIT_NAME = NameOID.ORGANIZATIONAL_UNIT_NAME
OID_ORGANIZATION_NAME = NameOID.ORGANIZATION_NAME
OID_PSEUDONYM = NameOID.PSEUDONYM
OID_SERIAL_NUMBER = NameOID.SERIAL_NUMBER
OID_STATE_OR_PROVINCE_NAME = NameOID.STATE_OR_PROVINCE_NAME
OID_SURNAME = NameOID.SURNAME
OID_TITLE = NameOID.TITLE
OID_CLIENT_AUTH = ExtendedKeyUsageOID.CLIENT_AUTH
OID_CODE_SIGNING = ExtendedKeyUsageOID.CODE_SIGNING
OID_EMAIL_PROTECTION = ExtendedKeyUsageOID.EMAIL_PROTECTION
OID_OCSP_SIGNING = ExtendedKeyUsageOID.OCSP_SIGNING
OID_SERVER_AUTH = ExtendedKeyUsageOID.SERVER_AUTH
OID_TIME_STAMPING = ExtendedKeyUsageOID.TIME_STAMPING
OID_ANY_POLICY = CertificatePoliciesOID.ANY_POLICY
OID_CPS_QUALIFIER = CertificatePoliciesOID.CPS_QUALIFIER
OID_CPS_USER_NOTICE = CertificatePoliciesOID.CPS_USER_NOTICE
OID_CERTIFICATE_ISSUER = CRLEntryExtensionOID.CERTIFICATE_ISSUER
OID_CRL_REASON = CRLEntryExtensionOID.CRL_REASON
OID_INVALIDITY_DATE = CRLEntryExtensionOID.INVALIDITY_DATE
OID_CA_ISSUERS = AuthorityInformationAccessOID.CA_ISSUERS
OID_OCSP = AuthorityInformationAccessOID.OCSP
__all__ = [
"load_pem_x509_certificate",
"load_der_x509_certificate",
"load_pem_x509_csr",
"load_der_x509_csr",
"load_pem_x509_crl",
"load_der_x509_crl",
"random_serial_number",
"InvalidVersion",
"DuplicateExtension",
"UnsupportedExtension",
"ExtensionNotFound",
"UnsupportedGeneralNameType",
"NameAttribute",
"Name",
"RelativeDistinguishedName",
"ObjectIdentifier",
"ExtensionType",
"Extensions",
"Extension",
"ExtendedKeyUsage",
"OCSPNoCheck",
"BasicConstraints",
"CRLNumber",
"KeyUsage",
"AuthorityInformationAccess",
"AccessDescription",
"CertificatePolicies",
"PolicyInformation",
"UserNotice",
"NoticeReference",
"SubjectKeyIdentifier",
"NameConstraints",
"CRLDistributionPoints",
"DistributionPoint",
"ReasonFlags",
"InhibitAnyPolicy",
"SubjectAlternativeName",
"IssuerAlternativeName",
"AuthorityKeyIdentifier",
"GeneralNames",
"GeneralName",
"RFC822Name",
"DNSName",
"UniformResourceIdentifier",
"RegisteredID",
"DirectoryName",
"IPAddress",
"OtherName",
"Certificate",
"CertificateRevocationList",
"CertificateRevocationListBuilder",
"CertificateSigningRequest",
"RevokedCertificate",
"RevokedCertificateBuilder",
"CertificateSigningRequestBuilder",
"CertificateBuilder",
"Version",
"_SIG_OIDS_TO_HASH",
"OID_CA_ISSUERS",
"OID_OCSP",
"_GENERAL_NAMES",
"CertificateIssuer",
"CRLReason",
"InvalidityDate",
"UnrecognizedExtension",
"PolicyConstraints",
]
| hipnusleo/laserjet | resource/pypi/cryptography-1.7.1/src/cryptography/x509/__init__.py | Python | apache-2.0 | 7,036 |
"""
Parser for vox 2006-2007 images
reads ImageSet directory and gets classes of images
than writes it to arff file
"""
import os
SIZE = 5305
maxint = 0
res = {}
for subdir, dirs, files in os.walk('.'):
for file in files:
if file[-3:] != "txt":
continue
cls,dummy = file.split("_")
f=open(file, 'r')
lines=f.readlines()
if cls not in res:
res[cls] = list(range(0,SIZE))
for line in lines:
id,val = line.strip().replace(" "," ").split(" ")
res[cls][int(id)] = val
if maxint < int(id):
maxint = int(id)
print("@relation classification\n")
print("@attribute image string")
KEYS=list(res.keys())
for key in KEYS:
print("@attribute %s numeric" % (key))
print("\n@data")
for i in range(1,SIZE):
line = "'%06d'," % (i,)
for cls in KEYS:
line += res[cls][i].__str__() + ","
line = line[:-1]
print(line)
| open-machine-learning/mldata-utils | scripts/voc2006classes.py | Python | gpl-3.0 | 954 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from rbnics.utils.io import NumpyIO
def function_save(fun, directory, filename, suffix=None):
if suffix is not None:
filename = filename + "." + str(suffix)
NumpyIO.save_file(fun.vector(), directory, filename)
| mathLab/RBniCS | rbnics/backends/online/numpy/wrapping/function_save.py | Python | lgpl-3.0 | 357 |
# -*- coding: utf-8 -*-
# =============================================================================
# Tasks to be callable async
# =============================================================================
tasks = {}
# -----------------------------------------------------------------------------
def gis_download_kml(record_id, filename, user_id=None):
"""
Download a KML file
- will normally be done Asynchronously if there is a worker alive
@param record_id: id of the record in db.gis_layer_kml
@param filename: name to save the file as
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
return gis.download_kml(record_id, filename)
tasks["gis_download_kml"] = gis_download_kml
# -----------------------------------------------------------------------------
def gis_update_location_tree(feature, user_id=None):
"""
Update the Location Tree for a feature
- will normally be done Asynchronously if there is a worker alive
@param feature: the feature (in JSON format)
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
feature = json.loads(feature)
return gis.update_location_tree(feature)
tasks["gis_update_location_tree"] = gis_update_location_tree
# -----------------------------------------------------------------------------
def sync_synchronize(repository_id, user_id=None, manual=False):
"""
Run all tasks for a repository, to be called from scheduler
"""
auth.s3_impersonate(user_id)
rtable = s3db.sync_repository
query = (rtable.deleted != True) & \
(rtable.id == repository_id)
repository = db(query).select(limitby=(0, 1)).first()
if repository:
sync = s3base.S3Sync()
status = sync.get_status()
if status.running:
message = "Synchronization already active - skipping run"
sync.log.write(repository_id=repository.id,
resource_name=None,
transmission=None,
mode=None,
action="check",
remote=False,
result=sync.log.ERROR,
message=message)
db.commit()
return sync.log.ERROR
sync.set_status(running=True, manual=manual)
try:
sync.synchronize(repository)
finally:
sync.set_status(running=False, manual=False)
db.commit()
return s3base.S3SyncLog.SUCCESS
tasks["sync_synchronize"] = sync_synchronize
# -----------------------------------------------------------------------------
def maintenance(period="daily"):
"""
Run all maintenance tasks which should be done daily
- these are read from the template
"""
mod = "applications.%s.private.templates.%s.maintenance as maintenance" % \
(appname, settings.get_template())
try:
exec("import %s" % mod)
except ImportError, e:
# No Custom Maintenance available, use the default
exec("import applications.%s.private.templates.default.maintenance as maintenance" % appname)
if period == "daily":
result = maintenance.Daily()()
else:
result = "NotImplementedError"
return result
tasks["maintenance"] = maintenance
# -----------------------------------------------------------------------------
if settings.has_module("msg"):
# -------------------------------------------------------------------------
def msg_process_outbox(contact_method, user_id=None):
"""
Process Outbox
- will normally be done Asynchronously if there is a worker alive
@param contact_method: one from s3msg.MSG_CONTACT_OPTS
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
return msg.process_outbox(contact_method)
tasks["msg_process_outbox"] = msg_process_outbox
# -------------------------------------------------------------------------
def msg_process_inbound_email(username, user_id):
"""
Poll an inbound email source.
@param username: email address of the email source to read from.
This uniquely identifies one inbound email task.
"""
# Run the Task & return the result
return msg.fetch_inbound_email(username)
tasks["msg_process_inbound_email"] = msg_process_inbound_email
# -------------------------------------------------------------------------
def msg_twilio_inbound_sms(account, user_id):
"""
Poll an inbound SMS(Twilio) source.
@param account: account name for the SMS source to read from.
This uniquely identifies one inbound SMS task.
"""
# Run the Task & return the result
return msg.twilio_inbound_sms(account)
tasks["msg_twilio_inbound_sms"] = msg_twilio_inbound_sms
# -----------------------------------------------------------------------------
def msg_parse_workflow(workflow, source, user_id):
"""
Processes the msg_log for unparsed messages.
"""
# Run the Task & return the result
return msg.parse_import(workflow, source)
tasks["msg_parse_workflow"] = msg_parse_workflow
# --------------------------------------------------------------------------
def msg_search_subscription_notifications(frequency):
"""
Search Subscriptions & send Notifications.
"""
# Run the Task & return the result
return s3db.msg_search_subscription_notifications(frequency=frequency)
tasks["msg_search_subscription_notifications"] = msg_search_subscription_notifications
# -----------------------------------------------------------------------------
if settings.has_module("stats"):
def stats_group_clean(user_id=None):
"""
Update the stats_aggregate table by calculating all the stats_group
records which have the dirty flag set to True
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
return s3db.stats_group_clean()
tasks["stats_group_clean"] = stats_group_clean
def stats_update_time_aggregate(data_id=None, user_id=None):
"""
Update the stats_aggregate table for the given stats_data record
@param data_id: the id of the stats_data record just added
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
return s3db.stats_update_time_aggregate(data_id)
tasks["stats_update_time_aggregate"] = stats_update_time_aggregate
def stats_update_aggregate_location(location_level,
root_location_id,
parameter_id,
start_date,
end_date,
user_id=None):
"""
Update the stats_aggregate table for the given location and parameter
@param location_level: the gis level at which the data needs to be accumulated
@param root_location_id: the id of the location
@param paramerter_id: the parameter for which the stats are being updated
@param start_date: the start date of the period in question
@param end_date: the end date of the period in question
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
return s3db.stats_update_aggregate_location(location_level,
root_location_id,
parameter_id,
start_date,
end_date,
)
tasks["stats_update_aggregate_location"] = stats_update_aggregate_location
# -----------------------------------------------------------------------------
# Instantiate Scheduler instance with the list of tasks
s3.tasks = tasks
s3task = s3base.S3Task()
current.s3task = s3task
# -----------------------------------------------------------------------------
# Reusable field for scheduler task links
scheduler_task_id = S3ReusableField("scheduler_task_id",
"reference %s" % s3base.S3Task.TASK_TABLENAME,
ondelete="CASCADE")
s3.scheduler_task_id = scheduler_task_id
# END =========================================================================
| madhurauti/Map-Polygon | models/tasks.py | Python | mit | 9,381 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list all project IDs associated with the active user."""
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.projects import util as command_lib_util
class List(base.ListCommand):
"""List projects accessible by the active account.
Lists all active projects, where the active account has Owner, Editor or
Viewer permissions. Projects are listed in alphabetical order by project name.
Projects that have been deleted or are pending deletion are not included.
You can specify the maximum number of projects to list using the `--limit`
flag.
## EXAMPLES
The following command lists a maximum of five projects sorted alphabetically
by name:
$ {command} --limit=5
"""
def Collection(self):
return command_lib_util.PROJECTS_COLLECTION
def GetUriFunc(self):
return command_lib_util.ProjectsUriFunc
def Run(self, args):
"""Run the list command."""
# TODO(user): b/27946801 handle --limit,--page-size,--filter
return projects_api.List()
| KaranToor/MA450 | google-cloud-sdk/lib/surface/projects/list.py | Python | apache-2.0 | 1,688 |
import unittest
import numpy as np
from control.ctrlutil import *
class TestUtils(unittest.TestCase):
def setUp(self):
self.mag = np.array([1, 10, 100, 2, 0.1, 0.01])
self.db = np.array([0, 20, 40, 6.0205999, -20, -40])
def check_unwrap_array(self, angle, period=None):
if period is None:
angle_mod = angle % (2 * np.pi)
angle_unwrap = unwrap(angle_mod)
else:
angle_mod = angle % period
angle_unwrap = unwrap(angle_mod, period)
np.testing.assert_array_almost_equal(angle_unwrap, angle)
def test_unwrap_increasing(self):
angle = np.linspace(0, 20, 50)
self.check_unwrap_array(angle)
def test_unwrap_decreasing(self):
angle = np.linspace(0, -20, 50)
self.check_unwrap_array(angle)
def test_unwrap_inc_degrees(self):
angle = np.linspace(0, 720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_dec_degrees(self):
angle = np.linspace(0, -720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_large_skips(self):
angle = np.array([0., 4 * np.pi, -2 * np.pi])
np.testing.assert_array_almost_equal(unwrap(angle), [0., 0., 0.])
def test_unwrap_list(self):
angle = [0, 2.2, 5.4, -0.4]
angle_unwrapped = [0, 0.2, 0.4, 0.6]
np.testing.assert_array_almost_equal(unwrap(angle, 1.0), angle_unwrapped)
def test_db2mag(self):
for mag, db in zip(self.mag, self.db):
np.testing.assert_almost_equal(mag, db2mag(db))
def test_db2mag_array(self):
mag_array = db2mag(self.db)
np.testing.assert_array_almost_equal(mag_array, self.mag)
def test_mag2db(self):
for db, mag in zip(self.db, self.mag):
np.testing.assert_almost_equal(db, mag2db(mag))
def test_mag2db_array(self):
db_array = mag2db(self.mag)
np.testing.assert_array_almost_equal(db_array, self.db)
if __name__ == "__main__":
unittest.main()
| roryyorke/python-control | control/tests/ctrlutil_test.py | Python | bsd-3-clause | 2,021 |
#!/usr/bin/env python
# This script plots a polygon created from points.
import pdb
import sys
import warnings
import numpy as np
from cornish import ASTPolygon
from cornish import ASTICRSFrame, ASTFrameSet, ASTBox, ASTFITSChannel, ASTCircle, ASTCompoundRegion
import astropy.units as u
from astropy.io import fits
import matplotlib.pyplot as plt
import starlink.Grf as Grf
import starlink.Ast as Ast
import starlink.Atl as Atl
points = np.array([[ 24.9220814, -2.32553877e-01],
[ 24.8690619, -2.13198227e-01],
[ 24.8080071, -1.56379062e-01],
[ 24.7961841, -1.32038075e-01],
[ 24.7603950, -3.85297093e-02],
[ 24.7542463, 1.17204538e-02],
[ 24.7542463, 1.17204538e-02],
[ 24.7769168, 8.05598701e-02],
[ 24.8216773, 1.66088007e-01],
[ 24.8332202, 1.83192204e-01],
[ 24.8724133, 2.11948177e-01],
[ 24.9190898, 2.36432081e-01],
[ 25.0443598, 2.38795506e-01],
[ 25.0694520, 2.34736352e-01],
[ 25.1083355, 2.26095876e-01],
[ 25.1263480, 2.15632984e-01],
[ 25.1730281, 1.80042404e-01],
[ 25.2055145, 1.40829694e-01],
[ 25.2459929, 1.54015514e-02],
[ 25.2459929, 1.54015514e-02],
[ 25.2426565, -3.86550958e-02],
[ 25.2219908, -9.67569213e-02],
[ 25.1986233, -1.49820068e-01],
[ 25.0872686, -2.32297073e-01]])
icrs_frame = ASTICRSFrame()
polygon = ASTPolygon(frame=icrs_frame, points=points)
galex_circle = ASTCircle(frame=icrs_frame, center=[24.617269485878584,0.2727299618460874], radius=1.1312250143591236)
compound_region = ASTCompoundRegion(regions=[polygon, galex_circle], operation=Ast.AND)
# define the extend of the plot
#bounding_circle = polygon.boundingCircle()
bounding_circle = compound_region.boundingCircle()
# -------------------------------------------------------
# Create frame set that will map the position in the plot
# (i.e. pixel coordinates) to the sky (i.e. WCS)
fits_chan = ASTFITSChannel()
cards = {
"CRVAL1":bounding_circle.center[0], # reference point (image center) in sky coords
"CRVAL2":bounding_circle.center[1],
"CTYPE1":"RA---TAN", #"GLON-TAN", # projection type
"CTYPE2":"DEC--TAN", #"GLAT-TAN",
"CRPIX1":50.5, # reference point (image center) point in pixel coords
"CRPIX2":50.5,
"CDELT1":2.1*bounding_circle.radius.to_value(u.deg)/100,
"CDELT2":2.1*bounding_circle.radius.to_value(u.deg)/100,
"NAXIS1":100,
"NAXIS2":100,
"NAXES":2,
}
print(cards)
naxis1 = cards['NAXIS1']
naxis2 = cards['NAXIS2']
pix2sky_mapping = ASTFrameSet.fromFITSHeader(fits_header=cards)
# -------------------------------------------------------
#pix2sky_mapping.system = "Galactic"
print(bounding_circle.center)
# Create a matplotlib figure, 12x12 inches in size.
dx=12.0
dy=12.0
fig = plt.figure( figsize=(dx,dy) )
fig_aspect_ratio = dy/dx
# Set up the bounding box of the image in pixel coordinates, and get
# the aspect ratio of the image.
naxis1 = int(cards["NAXIS1"])
naxis2 = int(cards["NAXIS2"])
bbox = (0.5, 0.5, naxis1 + 0.5, naxis2 + 0.5)
fits_aspect_ratio = ( bbox[3] - bbox[1] )/( bbox[2] - bbox[0] )
#fits_aspect_ratio = 1
# Set up the bounding box of the image as fractional offsets within the
# figure. The hx and hy variables hold the horizontal and vertical half
# widths of the image, as fractions of the width and height of the figure.
# Shrink the image area by a factor of 0.7 to leave room for annotated axes.
if fig_aspect_ratio > fits_aspect_ratio :
hx = 0.5
hy = 0.5*fits_aspect_ratio/fig_aspect_ratio
else:
hx = 0.5*fig_aspect_ratio/fits_aspect_ratio
hy = 0.5
hx *= 0.7
hy *= 0.7
gbox = ( 0.5 - hx, 0.5 - hy, 0.5 + hx, 0.5 + hy )
# Add an Axes structure to the figure and display the image within it,
# scaled between data values zero and 100. Suppress the axes as we will
# be using AST to create axes.
ax_image = fig.add_axes( [ gbox[0], gbox[1], gbox[2] - gbox[0],
gbox[3] - gbox[1] ], zorder=1 )
ax_image.xaxis.set_visible( False )
ax_image.yaxis.set_visible( False )
#ax_image.imshow( hdu_list[0].data, vmin=0, vmax=200, cmap=plt.cm.gist_heat,
# origin='lower', aspect='auto')
# Add another Axes structure to the figure to hold the annotated axes
# produced by AST. It is displayed on top of the previous Axes
# structure. Make it transparent so that the image will show through.
ax_plot = fig.add_axes( [ 0, 0, 1, 1 ], zorder=2 )
ax_plot.xaxis.set_visible(False)
ax_plot.yaxis.set_visible(False)
ax_plot.patch.set_alpha(0.0)
# Create a drawing object that knows how to draw primitives
# (lines, marks and strings) into this second Axes structure.
grf = Grf.grf_matplotlib( ax_plot )
#print(f"gbox: {gbox}")
#print(f"bbox: {bbox}")
# box in graphics coordinates (area to draw on, dim of plot)
#plot = Ast.Plot( frameset.astObject, gbox, bbox, grf )
plot = Ast.Plot( pix2sky_mapping.astObject, gbox, bbox, grf, options="Uni1=ddd:mm:ss" )
#, options="Grid=1" )
#plot.set( "Colour(border)=2, Font(textlab)=3" );
plot.Grid = True # can change the line properties
plot.Format_1 = "dms"
# colors:
# 1 = black
# 2 = red
# 3 = lime
# 4 = blue
# 5 =
# 6 = pink
plot.grid()
plot.Width_Border = 2
#plot.Colour_Border = "#0099cc"
#plot.regionoutline(bounding_circle.astObject)
plot.Colour_Border = "#106942"
plot.regionoutline(polygon.astObject)
plot.Colour_Border = "blue"
plot.regionoutline(galex_circle.astObject)
#plt.plot(galex_circle.center[0],galex_circle.center[1],'ro')
plot.Colour_Border = "red"
plot.Style = 3
plot.regionoutline(bounding_circle.astObject)
plt.show()
| demitri/cornish | examples/polygon_plot.py | Python | mit | 5,417 |
# -- coding: utf-8 --
#-------------------------------------------------------------------------------
# Name: umsl_edu
# Purpose: University of Missouri - St Louis
#
# Author: Ramakrishna
#
# Dated: 11/Apr/2016
# Copyright: (c) Ramakrishna 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import requests, re, os, csv
from lxml import html
import socks, socket
from collections import OrderedDict
from queue import Queue
from threading import Thread
socks.setdefaultproxy(proxy_type=socks.PROXY_TYPE_SOCKS5, addr="127.0.0.1", port=9150)
socket.socket = socks.socksocket
url = 'https://apps.umsl.edu/webapps/ITS/DirectorySearch/Search.cfm'
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'}
def search(term):
try:
s = requests.session()
data = {'LastName':term.replace("\n", ""), 'dept':'', 'div':'', 'divname':'', 'saveForm':'Search', 'um_type':'S'}
r = s.post(url, headers=headers, data=data)
tree = html.fromstring(r.content)
students = tree.xpath("//table[@id='myResults']//tr")
records = []
count = len(students)
for i in range(1,count):
temp = "$$$".join(students[i].xpath("*//text()[normalize-space()]")).replace("\r\n\t","").replace("\t","").strip()
row = OrderedDict()
name = email = ''
try:
name, email = temp.split("$$$")
row['name'] = name
row['email'] = email
except:
continue
records.append(row)
if len(records) > 0:
file_exists = os.path.isfile('umsl_edu.csv')
with open('umsl_edu.csv', 'a', newline='', encoding='utf-8') as outfile:
fp = csv.DictWriter(outfile, records[0].keys())
if not file_exists:
fp.writeheader()
fp.writerows(records)
with open('umsl_terms', 'a') as f:
f.write(term)
except Exception as e:
print(e.__doc__)
print(e.args)
return None
class Worker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
term = self.queue.get()
search(term)
self.queue.task_done()
def main():
try:
terms = set(open('twochars').readlines())
if os.path.isfile('umsl_terms'):
finished_terms = set(open('umsl_terms').readlines())
terms -= finished_terms
terms = list(terms)
queue = Queue()
for x in range(16):
worker = Worker(queue)
worker.daemon = True
worker.start()
terms_count = len(terms)
for i in range(0, terms_count):
queue.put(terms[i])
queue.join()
except Exception as e:
print(e.__doc__)
print(e.args)
if __name__ == '__main__':
main()
| brkrishna/freelance | univs/archives/umsl_edu.py | Python | gpl-2.0 | 2,625 |
"""Probit regression class and diagnostics."""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import numpy.linalg as la
import scipy.optimize as op
from scipy.stats import norm, chisqprob
import scipy.sparse as SP
import user_output as USER
import summary_output as SUMMARY
from utils import spdot, spbroadcast
__all__ = ["Probit"]
class BaseProbit(object):
"""
Probit class to do all the computations
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance or spatial weights sparse matrix
aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
Note: Disregards the presence of dummies.
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse2004]_
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in [Kelejian2001]_
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse1998]_
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> x = np.array([dbf.by_col('INC'), dbf.by_col('HOVAL')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseProbit((y>40).astype(float), x, w=w)
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
"""
def __init__(self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100):
self.y = y
self.x = x
self.n, self.k = x.shape
self.optim = optim
self.scalem = scalem
self.w = w
self.maxiter = maxiter
par_est, self.warning = self.par_est()
self.betas = np.reshape(par_est[0], (self.k, 1))
self.logl = -float(par_est[1])
@property
def vm(self):
try:
return self._cache['vm']
except AttributeError:
self._cache = {}
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
except KeyError:
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
return self._cache['vm']
@vm.setter
def vm(self, val):
try:
self._cache['vm'] = val
except AttributeError:
self._cache = {}
self._cache['vm'] = val
@property #could this get packaged into a separate function or something? It feels weird to duplicate this.
def z_stat(self):
try:
return self._cache['z_stat']
except AttributeError:
self._cache = {}
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
except KeyError:
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
return self._cache['z_stat']
@z_stat.setter
def z_stat(self, val):
try:
self._cache['z_stat'] = val
except AttributeError:
self._cache = {}
self._cache['z_stat'] = val
@property
def slopes_std_err(self):
try:
return self._cache['slopes_std_err']
except AttributeError:
self._cache = {}
self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal())
except KeyError:
self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal())
return self._cache['slopes_std_err']
@slopes_std_err.setter
def slopes_std_err(self, val):
try:
self._cache['slopes_std_err'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_std_err'] = val
@property
def slopes_z_stat(self):
try:
return self._cache['slopes_z_stat']
except AttributeError:
self._cache = {}
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
except KeyError:
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
return self._cache['slopes_z_stat']
@slopes_z_stat.setter
def slopes_z_stat(self, val):
try:
self._cache['slopes_z_stat'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_z_stat'] = val
@property
def xmean(self):
try:
return self._cache['xmean']
except AttributeError:
self._cache = {}
try: #why is this try-accept? can x be a list??
self._cache['xmean'] = np.reshape(sum(self.x) / self.n, (self.k, 1))
except:
self._cache['xmean'] = np.reshape(sum(self.x).toarray() / self.n, (self.k, 1))
except KeyError:
try:
self._cache['xmean'] = np.reshape(sum(self.x) / self.n, (self.k, 1))
except:
self._cache['xmean'] = np.reshape(sum(self.x).toarray() / self.n, (self.k, 1))
return self._cache['xmean']
@xmean.setter
def xmean(self, val):
try:
self._cache['xmean'] = val
except AttributeError:
self._cache = {}
self._cache['xmean'] = val
@property
def xb(self):
try:
return self._cache['xb']
except AttributeError:
self._cache = {}
self._cache['xb'] = spdot(self.x, self.betas)
except KeyError:
self._cache['xb'] = spdot(self.x, self.betas)
return self._cache['xb']
@xb.setter
def xb(self, val):
try:
self._cache['xb'] = val
except AttributeError:
self._cache = {}
self._cache['xb'] = val
@property
def predy(self):
try:
return self._cache['predy']
except AttributeError:
self._cache = {}
self._cache['predy'] = norm.cdf(self.xb)
except KeyError:
self._cache['predy'] = norm.cdf(self.xb)
return self._cache['predy']
@predy.setter
def predy(self, val):
try:
self._cache['predy'] = val
except AttributeError:
self._cache = {}
self._cache['predy'] = val
@property
def predpc(self):
try:
return self._cache['predpc']
except AttributeError:
self._cache = {}
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100.0 * np.sum(predpc) / self.n)
except KeyError:
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100.0 * np.sum(predpc) / self.n)
return self._cache['predpc']
@predpc.setter
def predpc(self, val):
try:
self._cache['predpc'] = val
except AttributeError:
self._cache = {}
self._cache['predpc'] = val
@property
def phiy(self):
try:
return self._cache['phiy']
except AttributeError:
self._cache = {}
self._cache['phiy'] = norm.pdf(self.xb)
except KeyError:
self._cache['phiy'] = norm.pdf(self.xb)
return self._cache['phiy']
@phiy.setter
def phiy(self, val):
try:
self._cache['phiy'] = val
except AttributeError:
self._cache = {}
self._cache['phiy'] = val
@property
def scale(self):
try:
return self._cache['scale']
except AttributeError:
self._cache = {}
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
elif self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T, self.betas)))
except KeyError:
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
if self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T, self.betas)))
return self._cache['scale']
@scale.setter
def scale(self, val):
try:
self._cache['scale'] = val
except AttributeError:
self._cache = {}
self._cache['scale'] = val
@property
def slopes(self):
try:
return self._cache['slopes']
except AttributeError:
self._cache = {}
self._cache['slopes'] = self.betas[1:] * self.scale
except KeyError:
self._cache['slopes'] = self.betas[1:] * self.scale
return self._cache['slopes']
@slopes.setter
def slopes(self, val):
try:
self._cache['slopes'] = val
except AttributeError:
self._cache = {}
self._cache['slopes'] = val
@property
def slopes_vm(self):
try:
return self._cache['slopes_vm']
except AttributeError:
self._cache = {}
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
except KeyError:
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
return self._cache['slopes_vm']
@slopes_vm.setter
def slopes_vm(self, val):
try:
self._cache['slopes_vm'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_vm'] = val
@property
def LR(self):
try:
return self._cache['LR']
except AttributeError:
self._cache = {}
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
except KeyError:
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
return self._cache['LR']
@LR.setter
def LR(self, val):
try:
self._cache['LR'] = val
except AttributeError:
self._cache = {}
self._cache['LR'] = val
@property
def u_naive(self):
try:
return self._cache['u_naive']
except AttributeError:
self._cache = {}
self._cache['u_naive'] = self.y - self.predy
except KeyError:
u_naive = self.y - self.predy
self._cache['u_naive'] = u_naive
return self._cache['u_naive']
@u_naive.setter
def u_naive(self, val):
try:
self._cache['u_naive'] = val
except AttributeError:
self._cache = {}
self._cache['u_naive'] = val
@property
def u_gen(self):
try:
return self._cache['u_gen']
except AttributeError:
self._cache = {}
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
except KeyError:
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
return self._cache['u_gen']
@u_gen.setter
def u_gen(self, val):
try:
self._cache['u_gen'] = val
except AttributeError:
self._cache = {}
self._cache['u_gen'] = val
@property
def Pinkse_error(self):
try:
return self._cache['Pinkse_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['Pinkse_error']
@Pinkse_error.setter
def Pinkse_error(self, val):
try:
self._cache['Pinkse_error'] = val
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'] = val
@property
def KP_error(self):
try:
return self._cache['KP_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['KP_error']
@KP_error.setter
def KP_error(self, val):
try:
self._cache['KP_error'] = val
except AttributeError:
self._cache = {}
self._cache['KP_error'] = val
@property
def PS_error(self):
try:
return self._cache['PS_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['PS_error']
@PS_error.setter
def PS_error(self, val):
try:
self._cache['PS_error'] = val
except AttributeError:
self._cache = {}
self._cache['PS_error'] = val
def par_est(self):
start = np.dot(la.inv(spdot(self.x.T, self.x)),
spdot(self.x.T, self.y))
flogl = lambda par: -self.ll(par)
if self.optim == 'newton':
fgrad = lambda par: self.gradient(par)
fhess = lambda par: self.hessian(par)
par_hat = newton(flogl, start, fgrad, fhess, self.maxiter)
warn = par_hat[2]
else:
fgrad = lambda par: -self.gradient(par)
if self.optim == 'bfgs':
par_hat = op.fmin_bfgs(
flogl, start, fgrad, full_output=1, disp=0)
warn = par_hat[6]
if self.optim == 'ncg':
fhess = lambda par: -self.hessian(par)
par_hat = op.fmin_ncg(
flogl, start, fgrad, fhess=fhess, full_output=1, disp=0)
warn = par_hat[5]
if warn > 0:
warn = True
else:
warn = False
return par_hat, warn
def ll(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
ll = sum(np.log(norm.cdf(qxb)))
return ll
def gradient(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
gradient = spdot(lamb.T, self.x)[0]
return gradient
def hessian(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
xb = spdot(self.x, beta)
qxb = q * xb
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
hessian = spdot(self.x.T, spbroadcast(self.x,-lamb * (lamb + xb)))
return hessian
class Probit(BaseProbit):
"""
Classic non-spatial Probit and spatial diagnostics. The class includes a
printout that formats all the results and tests in a nice format.
The diagnostics for spatial dependence currently implemented are:
* Pinkse Error [Pinkse2004]_
* Kelejian and Prucha Moran's I [Kelejian2001]_
* Pinkse & Slade Error [Pinkse1998]_
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse2004]_
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in [Kelejian2001]_
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse1998]_
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the CRIME column (crime) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept. Since we want to run a probit model and for this
example we use the Columbus data, we also need to transform the continuous
CRIME variable into a binary variable. As in [McMillen1992]_, we define
y = 1 if CRIME > 40.
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> y = (y>40).astype(float)
Extract HOVAL (home values) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> names_to_extract = ['INC', 'HOVAL']
>>> x = np.array([dbf.by_col(name) for name in names_to_extract]).T
Since we want to the test the probit model for spatial dependence, we need to
specify the spatial weights matrix that includes the spatial configuration of
the observations into the error component of the model. To do that, we can open
an already existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. In PySAL, this
can be easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = Probit(y, x, w=w, name_y='crime', name_x=['income','home value'], name_ds='columbus', name_w='columbus.gal')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them.
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
Since we have provided a spatial weigths matrix, the diagnostics for
spatial dependence have also been computed. We can access them and their
p-values individually:
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
Or we can easily obtain a full summary of all the results nicely formatted and
ready to be printed simply by typing 'print model.summary'
"""
def __init__(
self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100,
vm=False, name_y=None, name_x=None, name_w=None, name_ds=None,
spat_diag=False):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
if w != None:
USER.check_weights(w, y)
spat_diag = True
ws = w.sparse
else:
ws = None
x_constant = USER.check_constant(x)
BaseProbit.__init__(self, y=y, x=x_constant, w=ws,
optim=optim, scalem=scalem, maxiter=maxiter)
self.title = "CLASSIC PROBIT ESTIMATOR"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.Probit(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def newton(flogl, start, fgrad, fhess, maxiter):
"""
Calculates the Newton-Raphson method
Parameters
----------
flogl : lambda
Function to calculate the log-likelihood
start : array
kx1 array of starting values
fgrad : lambda
Function to calculate the gradient
fhess : lambda
Function to calculate the hessian
maxiter : int
Maximum number of iterations until optimizer stops
"""
warn = 0
iteration = 0
par_hat0 = start
m = 1
while (iteration < maxiter and m >= 1e-04):
H = -la.inv(fhess(par_hat0))
g = fgrad(par_hat0).reshape(start.shape)
Hg = np.dot(H, g)
par_hat0 = par_hat0 + Hg
iteration += 1
m = np.dot(g.T, Hg)
if iteration == maxiter:
warn = 1
logl = flogl(par_hat0)
return (par_hat0, logl, warn)
def sp_tests(reg):
"""
Calculates tests for spatial dependence in Probit models
Parameters
----------
reg : regression object
output instance from a probit model
"""
if reg.w != None:
try:
w = reg.w.sparse
except:
w = reg.w
Phi = reg.predy
phi = reg.phiy
# Pinkse_error:
Phi_prod = Phi * (1 - Phi)
u_naive = reg.u_naive
u_gen = reg.u_gen
sig2 = np.sum((phi * phi) / Phi_prod) / reg.n
LM_err_num = np.dot(u_gen.T, (w * u_gen)) ** 2
trWW = np.sum((w * w).diagonal())
trWWWWp = trWW + np.sum((w * w.T).diagonal())
LM_err = float(1.0 * LM_err_num / (sig2 ** 2 * trWWWWp))
LM_err = np.array([LM_err, chisqprob(LM_err, 1)])
# KP_error:
moran = moran_KP(reg.w, u_naive, Phi_prod)
# Pinkse-Slade_error:
u_std = u_naive / np.sqrt(Phi_prod)
ps_num = np.dot(u_std.T, (w * u_std)) ** 2
trWpW = np.sum((w.T * w).diagonal())
ps = float(ps_num / (trWW + trWpW))
# chi-square instead of bootstrap.
ps = np.array([ps, chisqprob(ps, 1)])
else:
raise Exception, "W matrix must be provided to calculate spatial tests."
return LM_err, moran, ps
def moran_KP(w, u, sig2i):
"""
Calculates Moran-flavoured tests
Parameters
----------
w : W
PySAL weights instance aligned with y
u : array
nx1 array of naive residuals
sig2i : array
nx1 array of individual variance
"""
try:
w = w.sparse
except:
pass
moran_num = np.dot(u.T, (w * u))
E = SP.lil_matrix(w.get_shape())
E.setdiag(sig2i.flat)
E = E.asformat('csr')
WE = w * E
moran_den = np.sqrt(np.sum((WE * WE + (w.T * E) * WE).diagonal()))
moran = float(1.0 * moran_num / moran_den)
moran = np.array([moran, norm.sf(abs(moran)) * 2.])
return moran
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('CRIME')]).T
var_x = ['INC', 'HOVAL']
x = np.array([dbf.by_col(name) for name in var_x]).T
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
probit1 = Probit(
(y > 40).astype(float), x, w=w, name_x=var_x, name_y="CRIME",
name_ds="Columbus", name_w="columbus.dbf")
print probit1.summary
| pastephens/pysal | pysal/spreg/probit.py | Python | bsd-3-clause | 34,383 |
from flask import render_template
from . import main
from .. import login_manager
from ..models import User, Bookmark, Tag
@login_manager.user_loader
def load_user(userid):
return User.query.get(int(userid))
@main.route('/')
def index():
return render_template('index.html', new_bookmarks=Bookmark.newest(5))
@main.app_errorhandler(403)
def forbidden(e):
return render_template('403.html'), 403
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@main.app_context_processor
def inject_tags():
return dict(all_tags=Tag.all)
| iral/thermos-Flask | thermos/main/views.py | Python | gpl-2.0 | 699 |
import time
import logging
from yoci.travis.travis import Builds
from yoci.travis.travis import Job
lgr = logging.getLogger('travis_func_api')
lgr.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s-%(name)s[%(levelname)s] - '
'%(message)s')
console_handler.setFormatter(formatter)
lgr.addHandler(console_handler)
class TimeoutError(Exception):
pass
def _wait_for_build_state(build_id, end):
builds = Builds(lgr)
lgr.info('Waiting for build with ID {0} to reach final state for {1} '
'seconds.'.format(build_id, int(end-time.time())))
while time.time() < end:
build = builds.show_build(id=build_id)
state = build['state']
if state != 'passed' and state != 'failed' and state != 'errored':
lgr.info(
'Build still in progress. Waiting for build process to end. '
'Current build state is: ' + state)
time.sleep(10)
else:
lgr.info('Build matching ID {0} has reached a final state: {1}'
.format(build_id, state))
return build
lgr.warn('Failed waiting for build state to reach passed/failed. Build '
'state is {0}'.format(build['state']))
def _wait_for_commit(repo_name, branch_name, sha_id, end):
builds = Builds(lgr, repo=repo_name)
lgr.info('Waiting for commit with sha ID {0} and repo {1} and branch {2}'
' for: {3} seconds.'
.format(sha_id, repo_name, branch_name, int(end-time.time())))
while time.time() < end:
builds_list, commits = builds.list_builds()
for commit in commits:
if commit['sha'] == sha_id and commit['branch'] == branch_name:
lgr.info('Commit matching sha ID {0} was found'.format(sha_id))
return commit
lgr.info('Commit with sha ID {0} was not found. Waiting for 10 seconds'
.format(sha_id, repo_name))
time.sleep(10)
err = 'Failed waiting for commit with sha ID {0} on repo {1} and ' \
'branch {2}'.format(sha_id, repo_name, branch_name)
lgr.warning(err)
raise TimeoutError(err)
def _get_commit_id(repo_name, sha_id, end, branch_name=None):
commit = _wait_for_commit(repo_name, branch_name, sha_id, end)
if not commit:
raise RuntimeError(
'Failed waiting for commit with sha ID {0} on repo {1} and branch '
'{2}'.format(sha_id, repo_name, branch_name))
return commit['id']
def _get_build_with_id(builds, commit_id):
build_id = None
for build in builds:
if build['commit_id'] == commit_id:
build_id = build['id']
break
return build_id
def get_jobs_status(sha_id, repo_name, branch_name=None, timeout_min=15):
'''
Returns a dictionary containing job results for the specified commit and
repository name
:param sha_id:
The unique commit SHA ID.
:param repo_name:
The name of the repo the commit was made to.
:param branch_name:
The name of the branch the commit was made to.
:param timout_min:
The timeout to wait for a build to reach final state.
Default is set to 15 minutes.
:return: a dictionary containing job results for the specified commit
'''
end = time.time() + 60 * timeout_min
commit_id = _get_commit_id(repo_name, sha_id, end, branch_name=branch_name)
builds = Builds(lgr, repo=repo_name)
builds_list, commits = builds.list_builds()
build_id = _get_build_with_id(builds_list, commit_id)
# We wait for the build to reach final state.
build = _wait_for_build_state(build_id, end)
if build:
lgr.info('response for build with ID {0} is {1}'
.format(build_id, build))
job_ids = build['job_ids']
else:
raise RuntimeError('Failed waiting for build process to finish'
' for the duration of {0}'
.format(timeout_min))
jobs_state = dict()
lgr.info('Getting jobs state for build with ID {0}'.format(build_id))
for job_id in job_ids:
job = Job(lgr, job_id=job_id).show_job()
if 'env' in job['config']:
jobs_state.update({job['config']['env']: job['state']})
else:
jobs_state.update({'state': job['state']})
return jobs_state
| cloudify-cosmo/yo-ci | yoci/travis/functional_api.py | Python | apache-2.0 | 4,501 |
"""Tests for our `configure` command."""
import unittest
from unittest.mock import patch, mock_open
from docopt import docopt
from io import StringIO
import paci.cli as paci
from paci.commands.configure import Configure
doc = paci.__doc__
class TestConfigure(unittest.TestCase):
def test_prints_the_welcome_msg(self):
configure = Configure({'configure': True, '--no-choice': False, '--silent': True, '--main-registry': False, '--fallback-registry': False})
with patch('sys.stdout', new_callable=StringIO) as sysout:
with patch("paci.helpers.display_helper.std_input", return_value='') as m:
configure.run()
self.assertIn('Lets configure a new settings.yml for paci!', sysout.getvalue())
if __name__ == '__main__':
unittest.main()
| tradebyte/paci | tests/commands/test_configure.py | Python | mit | 798 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-30 19:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_light_enums.db
import magic_cards.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=127, unique=True)),
],
),
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('mana_cost', models.CharField(blank=True, max_length=63)),
('text', models.TextField(blank=True)),
('power', models.CharField(blank=True, max_length=7)),
('toughness', models.CharField(blank=True, max_length=7)),
],
bases=(magic_cards.models.NameMixin, models.Model),
),
migrations.CreateModel(
name='CardSubtype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, unique=True)),
],
bases=(magic_cards.models.NameMixin, models.Model),
),
migrations.CreateModel(
name='CardSupertype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, unique=True)),
],
bases=(magic_cards.models.NameMixin, models.Model),
),
migrations.CreateModel(
name='CardType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, unique=True)),
],
bases=(magic_cards.models.NameMixin, models.Model),
),
migrations.CreateModel(
name='Printing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rarity', django_light_enums.db.EnumField(choices=[(40, b'COMMON'), (10, b'MYTHIC'), (50, b'SPECIAL'), (20, b'RARE'), (60, b'BASIC_LAND'), (30, b'UNCOMMON')], default=10, enum_values=[40, 10, 50, 20, 60, 30])),
('flavor_text', models.TextField(blank=True)),
('number', models.CharField(blank=True, max_length=7)),
('multiverse_id', models.PositiveIntegerField(blank=True, null=True)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='printings', to='magic_cards.Artist')),
('card', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='printings', to='magic_cards.Card')),
],
),
migrations.CreateModel(
name='Set',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=63, unique=True)),
('code', models.CharField(max_length=8, unique=True)),
],
bases=(magic_cards.models.NameMixin, models.Model),
),
migrations.AddField(
model_name='printing',
name='set',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='printings', to='magic_cards.Set'),
),
migrations.AddField(
model_name='card',
name='subtypes',
field=models.ManyToManyField(to='magic_cards.CardSubtype'),
),
migrations.AddField(
model_name='card',
name='supertypes',
field=models.ManyToManyField(to='magic_cards.CardSupertype'),
),
migrations.AddField(
model_name='card',
name='types',
field=models.ManyToManyField(to='magic_cards.CardType'),
),
]
| pbaranay/django-magic-cards | src/magic_cards/migrations/0001_initial.py | Python | mit | 4,499 |
'''
Created on Aug 1, 2013
@author: Olga Botvinnik
'''
import unittest
from gscripts.qtools import Submitter
import subprocess
from subprocess import PIPE
import os
import shutil
import tests
HOSTNAME = subprocess.Popen('HOSTNAME', stdout=subprocess.PIPE).communicate()[
0].strip()
# Global variable to test if we're on a server, e.g. TSCC or oolite (
# "compute" means one of the oolite compute nodes)
ON_SERVER = set([HOSTNAME]) & set(['tscc', 'oolite', 'compute'])
class Test(unittest.TestCase):
commands = ['date', 'echo testing']
out_dir = 'test_output'
def setUp(self):
os.mkdir(self.out_dir)
def tearDown(self):
shutil.rmtree(self.out_dir)
def test_pbs(self):
"""Test PBS queue (TSCC)
"""
job_name = 'test_qtools_submitter_pbs'
submit_sh = '{}/{}.sh'.format(self.out_dir, job_name)
sub = Submitter(queue_type='PBS', sh_filename=submit_sh,
commands=self.commands,
job_name=job_name, nodes=1, ppn=1,
queue='home-yeo', walltime='0:01:00'
)
job_id = sub.job(submit=False)
true_result_string = '''#!/bin/bash
#PBS -N test_qtools_submitter_pbs
#PBS -o {0}/test_qtools_submitter_pbs.sh.out
#PBS -e {0}/test_qtools_submitter_pbs.sh.err
#PBS -V
#PBS -l walltime=0:01:00
#PBS -l nodes=1:ppn=1
#PBS -A yeo-group
#PBS -q home-yeo
# Go to the directory from which the script was called
cd $PBS_O_WORKDIR
date
echo testing
'''.format(self.out_dir)
true_result = true_result_string.split('\n')
# with open(submit_sh) as f:
# for x in f.readlines():
# print x,
for true, test in zip(true_result, open(submit_sh)):
self.assertEqual(true.strip().split(), test.strip().split())
# Make sure the job ID is a single (potentially multi-digit) integer
# But only do this if we're on TSCC or oolite
if ON_SERVER:
self.assertRegexpMatches(job_id, '^\d+$')
subprocess.Popen(["qdel", job_id],
stdout=PIPE)
def test_sge(self):
"""Test SGE queue (oolite)
"""
job_name = 'test_qtools_submitter_sge'
submit_sh = '{}/{}.sh'.format(self.out_dir, job_name)
sub = Submitter(queue_type='SGE', sh_filename=submit_sh,
commands=self.commands,
job_name=job_name, nodes=1, ppn=1,
queue='home-yeo', walltime='0:01:00'
)
job_id = sub.job(submit=False)
true_result_string = '''#!/bin/bash
#$ -N test_qtools_submitter_sge
#$ -o {0}/test_qtools_submitter_sge.sh.out
#$ -e {0}/test_qtools_submitter_sge.sh.err
#$ -V
#$ -S /bin/bash
#$ -cwd
#$ -l bigmem
#$ -l h_vmem=16G
date
echo testing
'''.format(self.out_dir)
true_result = true_result_string.split('\n')
# with open(submit_sh) as f:
# for x in f.readlines():
# print x,
for true, test in zip(true_result, open(submit_sh)):
self.assertEqual(true.strip().split(), test.strip().split())
# Make sure the job ID is a single (potentially multi-digit) integer
# But only do this if we're on TSCC or oolite
if ON_SERVER:
self.assertRegexpMatches(job_id, '^\d+$')
subprocess.Popen(["qdel", job_id],
stdout=PIPE)
# def test_wait_for_pbs(self):
# commands = ['date', 'echo testing PBS']
# job_name = 'test_qtools_submitter_wait_for_pbs'
# submit_sh = '%s.sh' % (job_name)
# sub = Submitter(queue_type='PBS', sh_filename= submit_sh,
# commands=commands,
# job_name=job_name, wait_for=['11111'])
# job_id = sub.write_sh(submit=False, nodes=1, ppn=16,
# queue='home-yeo', walltime='0:01:00')
# true_result_string = '''#!/bin/bash
# #PBS -N test_qtools_submitter_wait_for_pbs
# #PBS -o test_qtools_submitter_wait_for_pbs.sh.out
# #PBS -e test_qtools_submitter_wait_for_pbs.sh.err
# #PBS -V
# #PBS -l walltime=0:01:00
# #PBS -l nodes=1:ppn=16
# #PBS -A yeo-group
# #PBS -q home-yeo
# #PBS -W depend=afterok:11111
#
# # Go to the directory from which the script was called
# cd $PBS_O_WORKDIR
# date
# echo testing PBS
# '''
# true_result = true_result_string.split('\n')
#
# # with open(submit_sh) as f:
# # for x in f.readlines():
# # print x,
#
# for true, test in zip(true_result, open(submit_sh)):
# self.assertEqual(true.strip().split(), test.strip().split())
#
# # Make sure the job ID is a single (potentially multi-digit) integer
# if ON_SERVER:
# self.assertRegexpMatches(job_id, '^\d+$')
# subprocess.Popen(["qdel", job_id], stdout=PIPE)
#
# def test_wait_for_array_pbs(self):
# commands = ['date', 'echo testing PBS']
# job_name = 'test_qtools_submitter_wait_for_pbs'
# submit_sh = '%s.sh' % (job_name)
# sub = Submitter(queue_type='PBS', sh_filename= submit_sh,
# commands=commands,
# job_name=job_name, wait_for_array=['11111'])
# job_id = sub.write_sh(submit=False, nodes=1, ppn=16,
# queue='home-yeo', walltime='0:01:00')
# true_result_string = '''#!/bin/bash
# #PBS -N test_qtools_submitter_wait_for_pbs
# #PBS -o test_qtools_submitter_wait_for_pbs.sh.out
# #PBS -e test_qtools_submitter_wait_for_pbs.sh.err
# #PBS -V
# #PBS -l walltime=0:01:00
# #PBS -l nodes=1:ppn=16
# #PBS -A yeo-group
# #PBS -q home-yeo
# #PBS -W depend=afterokarray:11111
#
# # Go to the directory from which the script was called
# cd $PBS_O_WORKDIR
# date
# echo testing PBS
# '''
# true_result = true_result_string.split('\n')
#
# # with open(submit_sh) as f:
# # for x in f.readlines():
# # print x,
#
# for true, test in zip(true_result, open(submit_sh)):
# self.assertEqual(true.strip().split(), test.strip().split())
#
# # Make sure the job ID is a single (potentially multi-digit) integer
# if ON_SERVER:
# self.assertRegexpMatches(job_id, '^\d+$')
# subprocess.Popen(["qdel", job_id], stdout=PIPE)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.test_main']
unittest.main() | YeoLab/gscripts | tests/test_qtools_submitter.py | Python | mit | 6,476 |
from random import shuffle
from pybrain.supervised.trainers.backprop import BackpropTrainer
class PreventOverTrainer(BackpropTrainer):
def train(self):
assert len(self.ds) > 0, "Dataset cannot be empty."
self.module.resetDerivatives()
errors = 0
ponderation = 0.
shuffledSequences = []
for seq in self.ds._provideSequences():
shuffledSequences.append(seq)
shuffle(shuffledSequences)
for seq in shuffledSequences:
e, p = self._calcDerivs(seq)
errors += e
ponderation += p
#print("Total error:", errors)
if errors > 0.1:
self.module._setParameters(self.descent(self.module.derivs))
self.epoch += 1
self.totalepochs += 1
return errors / ponderation
| gnrhxni/CS542 | prevent_overtraining.py | Python | gpl-3.0 | 819 |
# ST2 uses Python 2.6 and ST3 uses Python 3.3.
import sublime, sublime_plugin, re, os, threading, sys, time
PYTHON_VERSION = sys.version_info
if PYTHON_VERSION[0] == 2:
import imp
root, module = os.path.split(os.getcwd())
# Build system
buildPackage = os.path.join(root, "Default", "exec.py")
imp.load_source("BUILD_SYSTEM", buildPackage)
del buildPackage
import BUILD_SYSTEM
elif PYTHON_VERSION[0] >= 3:
import importlib
BUILD_SYSTEM = importlib.import_module("Default.exec")
# ST3's API is ready to be used.
#def plugin_loaded():
# global USER_SETTINGS
# USER_SETTINGS = sublime.load_settings('SublimePapyrus.sublime-settings')
def GetSettings():
settings = sublime.load_settings('SublimePapyrus.sublime-settings')
if settings:
return settings
else:
ShowMessage("Could not load settings...")
return None
def ShowMessage(message):
sublime.status_message("SublimePapyrus - %s" % message)
def SetStatus(view, key, value):
view.set_status(key, value)
def ClearStatus(view, key):
view.erase_status(key)
ERROR_HIGHLIGHT_KEY = "sublime_papyrus_error"
ERROR_HIGHLIGHT_SCOPE = "invalid"
def ClearHighlights(view, key):
view.erase_regions(key)
def ClearLinterHighlights(view):
ClearHighlights(view, ERROR_HIGHLIGHT_KEY)
def HighlightLinter(view, line, column = None, center = True):
Highlight(view, ERROR_HIGHLIGHT_KEY, ERROR_HIGHLIGHT_SCOPE, line, column, center)
def Highlight(view, key, scope, line, column = None, center = True):
if view and line:
regions = view.get_regions(key) #[]
if column: # Highlight a word
point = view.text_point(line-1, column)
regions.append(view.word(sublime.Region(point)))
else: # Highlight a line
point = view.text_point(line-1, 0)
regions.append(view.line(sublime.Region(point)))
if len(regions) > 0:
view.add_regions(key, regions, scope)
settings = GetSettings()
if settings:
if center and settings.get("center_highlighted_line", True):
view.show_at_center(regions[0])
def GetSourcePaths(view):
if not view:
return None
match = re.search("source\.papyrus\.(\w+).*", view.scope_name(0), re.IGNORECASE)
if match:
module = match.group(1)
settings = GetSettings()
if settings:
modules = settings.get("modules", None)
if modules:
moduleSettings = modules.get(module, None)
if moduleSettings:
paths = moduleSettings.get("import", None)
if paths:
fullPath = view.file_name()
if fullPath:
folderPath, fileName = os.path.split(fullPath)
paths.insert(0, folderPath)
return paths
else:
ShowMessage("Could not find import paths for %s." % module.capitalize())
else:
ShowMessage("Could not find settings for %s." % module.capitalize())
else:
ShowMessage("Could not find settings for any modules.")
else:
ShowMessage("SublimePapyrus: Unsupported syntax definition.")
class SublimePapyrusFileSelectionPanelCommand(sublime_plugin.WindowCommand):
def run(self, **args):
items = args["items"]
if items:
self.items = items
settings = GetSettings()
if settings and settings.get("open_script_split_paths", True):
items = []
for path in self.items:
root, file = os.path.split(path)
items.append([file, root])
if PYTHON_VERSION[0] == 2:
self.window.show_quick_panel(items, self.on_select, 0, -1)
elif PYTHON_VERSION[0] >= 3:
self.window.show_quick_panel(items, self.on_select, 0, -1, None)
def on_select(self, index):
if index >= 0:
self.window.open_file(self.items[index])
# Base class that is used in the framework for showing a list of valid arguments and then inserting them.
# Libraries that need this functionality should import at least "sublime", "sublime_plugin", "sys", and this module.
# ST2 requires using the "imp" module to load this module first via the "load_source" function. ST3 can simply use "from SublimePapyrus import SublimePapyrus".
# Classes implementing this functionality need to inherit the "PapyrusShowSuggestionsCommand" class and override the "get_items" method.
# "get_items" should return a dictionary where the keys are the descriptions shown to the user and the values are what is inserted into the buffer.
class SublimePapyrusShowSuggestionsCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
selections = self.view.sel()
if selections != None and len(selections) == 1:
region = selections[0]
self.argument = region
items = self.get_items()
if items != None:
sortedKeysAndValues = sorted(zip(list(items.keys()), list(items.values())))
sortedKeys = [[key, str(value)] for (key, value) in sortedKeysAndValues]
sortedValues = [value for (key, value) in sortedKeysAndValues]
self.items = sortedKeys
self.values = sortedValues
if PYTHON_VERSION[0] == 2:
self.view.window().show_quick_panel(self.items, self.on_select, 0, -1)
else:
self.view.window().show_quick_panel(self.items, self.on_select, 0, -1, None)
def get_items(self, **args):
return None
def on_select(self, index):
if index >= 0:
value = str(self.values[index])
if value.isdigit() or value != "":
args = {"region_start": self.argument.a, "region_end": self.argument.b, "replacement": value}
else:
args = {"region_start": self.argument.a, "region_end": self.argument.b, "replacement": str(self.items[index][0])}
self.view.run_command("sublime_papyrus_insert_suggestion", args)
# Inserts the value chosen in the class that inherits "PapyrusShowSuggestionsCommand".
class SublimePapyrusInsertSuggestionCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
region = sublime.Region(args["region_start"], args["region_end"])
self.view.erase(edit, region)
if args["replacement"].isdigit():
self.view.insert(edit, args["region_start"], args["replacement"])
else:
self.view.insert(edit, args["region_start"], "\"" + args["replacement"] + "\"")
class SublimePapyrusClearErrorHighlightsCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
if self.view:
ClearLinterHighlights(self.view)
# Open a script based on input
class SublimePapyrusOpenScriptCommand(sublime_plugin.WindowCommand):
def run(self):
text = ""
self.view = self.window.active_view()
if self.view:
text = self.view.substr(self.view.sel()[0])
self.window.show_input_panel("Open script:", text, self.on_done, None, None)
def on_done(self, text):
if not text:
return
if PYTHON_VERSION[0] == 2:
self.get_matching_files(text)
elif PYTHON_VERSION[0] >= 3:
thread = threading.Thread(target=self.get_matching_files, args=(text,))
thread.start()
def get_matching_files(self, text, paths = None):
if not paths:
paths = GetSourcePaths(self.view)
if paths:
ShowMessage("Looking for matches...")
candidates = []
if text == "*":
text = ""
text = text.lower()
for path in paths:
for root, dirs, files in os.walk(path):
for file in files:
if text in file.lower():
fullPath = os.path.join(root, file)
if not fullPath in candidates:
candidates.append(fullPath)
break
i = len(candidates)
if i == 1:
ShowMessage("Found 1 match.")
self.window.open_file(candidates[0])
elif i > 1:
ShowMessage("Found %d matches." % i)
self.window.run_command("sublime_papyrus_file_selection_panel", {"items": candidates})
else:
ShowMessage("Found no matches.")
else:
settings = GetSettings()
modules = settings.get("modules", None)
if modules:
moduleTitles = []
self.modulePaths = []
for ident, moduleSettings in modules.items():
paths = moduleSettings.get("import", None)
if paths:
self.modulePaths.append(paths)
moduleTitles.append(moduleSettings.get("title", ident.capitalize()))
if moduleTitles:
self.text = text
self.window.show_quick_panel(moduleTitles, self.module_paths)
def module_paths(self, index):
if index >= 0 and index < len(self.modulePaths):
self.get_matching_files(self.text, self.modulePaths[index])
else:
return
# Build system
class SublimePapyrusCompileScriptCommand(sublime_plugin.WindowCommand):
def run(self, **args):
file = args["cmd"]
filePath, fileName = os.path.split(file)
regex = args["file_regex"]
module = args["module"]
batch = args.get("batch", False)
settings = GetSettings()
if settings:
modules = settings.get("modules", None)
if modules:
moduleSettings = modules.get(module, None)
if moduleSettings:
compiler = moduleSettings.get("compiler", None)
if not compiler or compiler == "":
return ShowMessage("The compiler path setting is undefined or invalid.")
flags = moduleSettings.get("flags", None)
if not flags or flags == "":
return ShowMessage("The flags name setting is undefined or invalid.")
output = moduleSettings.get("output", "")
if not output or output == "":
output, _ = os.path.split(filePath)
if output[-2:] == ":\\":
output = output + "\\"
imports = moduleSettings.get("import", None)
if imports:
if (PYTHON_VERSION[0] == 2 and isinstance(imports, list) and all(isinstance(k, basestring) for k in imports) and all(k != "" for k in imports)) or (PYTHON_VERSION[0] >= 3 and isinstance(imports, list) and all(isinstance(k, str) for k in imports) and all(k != "" for k in imports)):
if not batch:
if not filePath in imports:
imports.insert(0, filePath)
else:
t = filePath.lower()
if not all(t != k.lower() for k in imports) and settings.get("batch_compilation_warning", True) and not sublime.ok_cancel_dialog("Are you sure you want to batch compile all script sources in \"%s\"?\n\nThis folder is one of the import folders and may contain script sources that are a part of the base game. Compiling said script sources could lead to unintended behavior if they have been modified." % filePath):
return
for path in imports:
if not os.path.isdir(path):
return ShowMessage("The import path '%s' does not exist on the filesystem." % path)
imports = ";".join(imports)
else:
return ShowMessage("The import path(s) setting has to be a list of strings.")
else:
return ShowMessage("The import path(s) setting is undefined.")
arguments = moduleSettings.get("arguments", None)
if arguments:
if isinstance(arguments, list) and all(isinstance(k, str) for k in arguments):
temp = []
for k in arguments:
if k[:1] == "-":
temp.append(k)
else:
temp.append("-%s" % k)
arguments = temp
else:
return ShowMessage("The arguments setting has to be a list of strings.")
buildArguments = args.get("arguments", None)
if buildArguments:
if isinstance(buildArguments, list) and all(isinstance(k, str) for k in buildArguments):
if arguments and isinstance(arguments, list):
for k in buildArguments:
if k[:1] != "-":
k = "-%s" % k
if k not in arguments:
arguments.append(k)
elif not arguments:
arguments = []
for k in buildArguments:
if k[:1] == "-":
arguments.append(k)
else:
arguments.append("-%s" % k)
else:
return ShowMessage("The build system's arguments setting has to be a list of strings.")
if arguments and isinstance(arguments, list):
arguments = " ".join(arguments)
if not arguments:
arguments = ""
if not batch:
args = {"cmd": "\"%s\" \"%s\" -i=\"%s\" -o=\"%s\" -f=\"%s\" %s" % (compiler, fileName, imports, output, flags, arguments), "file_regex": regex}
else:
if filePath[-1:] == "\\":
filePath = filePath[:-1]
args = {"cmd": "\"%s\" \"%s\" -i=\"%s\" -o=\"%s\" -f=\"%s\" %s %s" % (compiler, filePath, imports, output, flags, batch, arguments), "file_regex": regex}
self.window.run_command("exec", args)
# Make completions
def MakeFunctionCompletion(stat, sem, calling = True, script = "", precededByKeyword = False, parameters = True):
tabTrigger = stat.data.name.lower()
if script:
script = " (%s)" % script
description = ""
if stat.data.type:
if stat.data.array:
description = "%s[] func.%s" % (stat.data.typeIdentifier, script)
else:
description = "%s func.%s" % (stat.data.typeIdentifier, script)
else:
description = "func.%s" % script
if calling:
content = ""
if stat.data.parameters:
if parameters:
i = 1
for param in stat.data.parameters:
if param.array:
if param.expression:
content = content + "${%d:%s[] %s = %s}, " % (i, param.type, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s[] %s}, " % (i, param.typeIdentifier, param.identifier)
else:
if param.expression:
content = content + "${%d:%s %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s %s}, " % (i, param.typeIdentifier, param.identifier)
i += 1
content = "%s(%s)" % (stat.data.identifier, content[:-2])
else:
content = "%s(${1})" % stat.data.identifier
else:
content = "%s()" % stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
else:
content = ""
if stat.data.parameters:
i = 1
for param in stat.data.parameters:
if param.array:
if param.expression:
content = content + "${%d:%s[] %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s[] %s}, " % (i, param.typeIdentifier, param.identifier)
else:
if param.expression:
content = content + "${%d:%s %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s %s}, " % (i, param.typeIdentifier, param.identifier)
i += 1
if len(content) > 0:
content = content[:-2]
if precededByKeyword:
content = "%s(%s)\n\t${0}\nEndFunction" % (stat.data.identifier, content)
else:
typ = ""
if stat.data.type:
if stat.data.array:
typ = "%s[] " % stat.data.typeIdentifier
else:
typ = "%s " % stat.data.typeIdentifier
content = "%sFunction %s(%s)\n\t${0}\nEndFunction" % (typ, stat.data.identifier, content)
return (tabTrigger + "\t" + description.lower(), content,)
def MakeEventCompletion(stat, sem, calling = True, script = "", precededByKeyword = False, parameters = True):
tabTrigger = stat.data.name.lower()
if script:
script = " (%s)" % script
description = "event%s" % script
if calling:
content = ""
if stat.data.parameters:
if parameters:
i = 1
for param in stat.data.parameters:
if param.array:
if param.expression:
content = content + "${%d:%s[] %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s[] %s}, " % (i, param.typeIdentifier, param.identifier)
else:
if param.expression:
content = content + "${%d:%s %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s %s}, " % (i, param.typeIdentifier, param.identifier)
i += 1
content = "%s(%s)" % (stat.data.identifier, content[:-2])
else:
content = "%s(${1})" % stat.data.identifier
else:
content = "%s()" % stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
else:
content = ""
if stat.data.parameters:
i = 1
for param in stat.data.parameters:
if param.array:
if param.expression:
content = content + "${%d:%s[] %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s[] %s}, " % (i, param.typeIdentifier, param.identifier)
else:
if param.expression:
content = content + "${%d:%s %s = %s}, " % (i, param.typeIdentifier, param.identifier, sem.GetLiteral(param.expression, True))
else:
content = content + "${%d:%s %s}, " % (i, param.typeIdentifier, param.identifier)
i += 1
if len(content) > 0:
content = content[:-2]
if precededByKeyword:
content = "%s(%s)\n\t${0}\nEndEvent" % (stat.data.identifier, content)
else:
content = "Event %s(%s)\n\t${0}\nEndEvent" % (stat.data.identifier, content)
return (tabTrigger + "\t" + description.lower(), content,)
def MakePropertyCompletion(stat, script = ""):
tabTrigger = stat.data.name.lower()
description = ""
if script:
script = " (%s)" % script
if stat.data.array:
description = "%s[] prop.%s" % (stat.data.typeIdentifier, script)
else:
description = "%s prop.%s" % (stat.data.typeIdentifier, script)
content = stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
def MakeVariableCompletion(stat):
tabTrigger = stat.data.name.lower()
description = ""
if stat.data.array:
description = "%s[] var." % (stat.data.typeIdentifier)
else:
description = "%s var." % (stat.data.typeIdentifier)
content = stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
def MakeParameterCompletion(stat):
tabTrigger = stat.data.name.lower()
description = ""
if stat.data.array:
description = "%s[] param." % (stat.data.typeIdentifier)
else:
description = "%s param." % (stat.data.typeIdentifier)
content = stat.data.identifier
return (tabTrigger + "\t" + description.lower(), content,)
# Checks the build result for errors and, depending on the settings, highlights lines that caused errors and/or hides the build results when there are no errors.
class ExecCommand(BUILD_SYSTEM.ExecCommand):
def finish(self, proc):
view = sublime.active_window().active_view()
if view:
if "source.papyrus" in view.scope_name(0):
view.erase_regions(ERROR_HIGHLIGHT_KEY)
userSettings = GetSettings()
if userSettings:
if userSettings.get('highlight_build_errors', True):
output = self.output_view.substr(sublime.Region(0, self.output_view.size()))
if output:
pattern = self.output_view.settings().get("result_file_regex")
if pattern:
errors = self.GetErrors(output, pattern)
if errors:
regions = self.GetRegions(view, errors)
if regions:
view.add_regions(ERROR_HIGHLIGHT_KEY, regions, ERROR_HIGHLIGHT_SCOPE)
if userSettings.get("center_highlighted_line", True):
view.show_at_center(regions[0])
elif userSettings.get('hide_successful_build_results', False):
self.window.run_command("hide_panel", {"panel": "output.exec"})
def GetErrors(self, output, pattern):
lines = output.rstrip().split('\n')
matches = []
regex = re.compile(pattern)
for line in lines:
match = regex.findall(line)
if len(match) > 0:
matches.append(match)
if len(matches) > 0:
return matches
else:
return None
def GetRegions(self, view, errors):
regions = []
for error in errors:
region = view.line(sublime.Region(view.text_point(int(error[0][1]) - 1, 0)))
regions.append(region)
del region
if len(regions) > 0:
return regions
else:
return None | Kapiainen/SublimePapyrus | Source/Core/Plugin.py | Python | mit | 19,267 |
# -*- coding: utf-8 -*-
from flask import abort
from purchasing.public.models import AppStatus
from purchasing_test.test_base import BaseTestCase
class TestRenderErrors(BaseTestCase):
render_templates = True
def build_rule(self, code):
def rule():
abort(code)
return rule
def setUp(self):
super(TestRenderErrors, self).setUp()
AppStatus.create()
self.codes = [401, 403, 404, 413, 500]
for i in self.codes:
self.client.application.add_url_rule(
'/' + str(i), str(i), self.build_rule(i)
)
def test_render_errors(self):
for i in self.codes:
self.client.get('/' + str(i))
self.assert_template_used('errors/{}.html'.format(i))
| CityofPittsburgh/pittsburgh-purchasing-suite | purchasing_test/integration/other/test_render_errors.py | Python | bsd-3-clause | 776 |
__author__ = 'gjones'
import time
import sys
import numpy as np
from kid_readout.roach import heterodyne
from kid_readout.utils import data_file, sweeps
from kid_readout.equipment import hittite_controller, lockin_controller
hittite = hittite_controller.hittiteController(addr='192.168.0.200')
lockin = lockin_controller.lockinController()
print lockin.get_idn()
ri = heterodyne.RoachHeterodyne(adc_valon='/dev/ttyUSB0')
ri.iq_delay = 0
ri.set_lo(1410.0)
#group_1_lo = 1020.0
#group_2_lo = 1410.0
#all_f0s = np.load('/data/readout/resonances/2016-01-13-jpl-2015-10-park-dark-32-resonances-split-at-1300.npy') -0.5
#group_1_f0 = all_f0s[all_f0s < 1300]
#group_2_f0 = all_f0s[all_f0s > 1300]
"""
all_f0s = np.load('/data/readout/resonances/2016-02-12-jpl-park-100nm-32-resonances.npy')
group_1_f0 = all_f0s[all_f0s<1500]
group_2_f0 = all_f0s[all_f0s>1800]
group_1_lo = 1220.0
group_2_lo = 1810.0
"""
all_f0s = np.load('/data/readout/resonances/2016-02-29-jpl-park-2015-10-40nm-al-niobium-gp-two-groups.npy')
group_1_f0 = all_f0s[all_f0s<1300]*0.9997 - 0.5
group_2_f0 = all_f0s[all_f0s>1300] - 0.5
group_1_lo = 1030.0
group_2_lo = 1420.0
#responsive_resonances = np.load('/data/readout/resonances/2015-11-26-jpl-nevins-responsive-resonances.npy')
suffix = "cw_noise_test"
mmw_source_modulation_freq = ri.set_modulation_output(rate=7)
mmw_source_frequency = 148e9
hittite.set_freq(mmw_source_frequency/12.0)
mmw_atten_turns = (5.0, 5.0)
#print "modulating at: {}".format(mmw_source_modulation_freq),
atonce = 16
for group_num,(lo,f0s) in enumerate(zip([group_1_lo,group_2_lo],[group_1_f0,group_2_f0])):
print "group",group_num,"lo",lo,"min f0",f0s.min()
ri.set_lo(lo)
nsamp = 2**16
step = 1
nstep = 128
f0binned = np.round(f0s * nsamp / 512.0) * 512.0 / nsamp
offset_bins = np.arange(-(nstep + 1), (nstep + 1)) * step
offsets = offset_bins * 512.0 / nsamp
measured_freqs = sweeps.prepare_sweep(ri, f0binned, offsets, nsamp=nsamp)
for hittite_power in np.arange(-3.4,1.1,0.2):
hittite.set_power(hittite_power)
df = data_file.DataFile(suffix=suffix)
df.nc.mmw_atten_turns = mmw_atten_turns
for atten_index,dac_atten in enumerate([2.]):
print "at dac atten", dac_atten
ri.set_dac_atten(dac_atten)
ri.set_modulation_output('low')
df.log_hw_state(ri)
df.log_adc_snap(ri)
sweep_data = sweeps.do_prepared_sweep(ri, nchan_per_step=atonce, reads_per_step=2)
df.add_sweep(sweep_data)
fmins = []
for k in range(len(f0s)):
fr, s21, errors = sweep_data.select_index(k)
fmins.append(fr[np.abs(s21).argmin()])
fmins.sort()
ri.add_tone_freqs(np.array(fmins),overwrite_last=True)
ri.select_bank(ri.tone_bins.shape[0] - 1)
# ri.set_tone_freqs(responsive_resonances[:32],nsamp=2**15)
ri.select_fft_bins(range(len(f0s)))
ri._sync()
time.sleep(0.5)
print "taking data with source on"
# raw_input("press enter to start")
ri.set_modulation_output('low')
df.log_hw_state(ri)
nsets = len(f0s) / atonce
tsg = None
for iset in range(nsets):
selection = range(len(f0s))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
dmod, addr = ri.get_data(256) # about 30 seconds of data
# x, y, r, theta = lockin.get_data()
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg)
df.sync()
print "taking data with source modulated"
ri.set_modulation_output(7)
df.log_hw_state(ri)
nsets = len(f0s) / atonce
tsg = None
for iset in range(nsets):
selection = range(len(f0s))[iset::nsets]
ri.select_fft_bins(selection)
ri._sync()
time.sleep(0.4)
t0 = time.time()
dmod, addr = ri.get_data(16) # about 2 seconds of data
x, y, r, theta = lockin.get_data()
tsg = df.add_timestream_data(dmod, ri, t0, tsg=tsg,zbd_voltage=r,mmw_source_freq=mmw_source_frequency)
df.sync()
df.close() | ColumbiaCMB/kid_readout | apps/data_taking_scripts/2015-10-jpl-park/sweep_and_stream_at_min_s21_two_groups_cw_noise_power.py | Python | bsd-2-clause | 4,432 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-2015 Augustin Cisterne-Kaas (ACK Consulting Limited)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Web Polymorphic Field',
'version': '0.1',
'category': 'Web',
'depends': ['web'],
'author': 'Augustin Cisterne-Kaas',
'description': """
Add a new widget named "polymorphic"
The polymorphic field allow to dynamically store an id linked to any model in
Odoo instead of the usual fixed one in the view definition
E.g:
<field name="model" widget="polymorphic" polymorphic="object_id" />
<field name="object_id" />
""",
# 'license': 'LGPL-3',
'data': [
'views/web_polymorphic_field.xml'
],
'js': [
'static/src/js/view_form.js'
],
'installable': True,
'application': False}
| litnimax/addons-yelizariev | web_polymorphic_field/__openerp__.py | Python | lgpl-3.0 | 1,531 |
import json
import urllib.request
from os import environ
from flask import Flask, Response, render_template, request
app = Flask(__name__)
LOCAL = ["127.0.0.1","localhost"]
@app.route('/')
def index():
return render_template('index.html')
@app.route('/lookup/')
def lookup():
# Get target from querystring.
target = request.args.get("target")
if not target:
# Use request ip if target arg not set.
target = "google.com"
if len(request.access_route) > 0 and request.access_route[0] not in LOCAL:
target = request.access_route[0]
# Remove scheme/path from target if exists
if "//" in target:
target = target.split("//")[1]
if "/" in target:
target = target.split("/")[0]
# Make request to ip-api.com json api.
try:
requestURL = "http://ip-api.com/json/{}".format(target)
rsp = urllib.request.urlopen(requestURL)
data = rsp.read().decode(rsp.info().get_param('charset') or 'utf-8')
return Response(data, mimetype='application/json')
except Exception as err:
# Return error in json response.
return Response(json.dumps({"message": str(err), "status": "fail"}), mimetype='application/json')
if __name__ == '__main__':
DEBUG = True
HOST = "0.0.0.0"
PORT = 5555
app.run(HOST, PORT, debug=DEBUG) | wonambi-dev/iptool | iptool.py | Python | mit | 1,363 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.