repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
zombieJ/incubator-eagle
|
refs/heads/master
|
eagle-external/hadoop_jmx_collector/lib/six/test_six.py
|
34
|
import operator
import sys
import types
import py
import six
def test_add_doc():
def f():
"""Icky doc"""
pass
six._add_doc(f, """New doc""")
assert f.__doc__ == "New doc"
def test_import_module():
from logging import handlers
m = six._import_module("logging.handlers")
assert m is handlers
def test_integer_types():
assert isinstance(1, six.integer_types)
assert isinstance(-1, six.integer_types)
assert isinstance(six.MAXSIZE + 23, six.integer_types)
assert not isinstance(.1, six.integer_types)
def test_string_types():
assert isinstance("hi", six.string_types)
assert isinstance(six.u("hi"), six.string_types)
assert issubclass(six.text_type, six.string_types)
def test_class_types():
class X:
pass
class Y(object):
pass
assert isinstance(X, six.class_types)
assert isinstance(Y, six.class_types)
assert not isinstance(X(), six.class_types)
def test_text_type():
assert type(six.u("hi")) is six.text_type
def test_binary_type():
assert type(six.b("hi")) is six.binary_type
def test_MAXSIZE():
try:
# This shouldn't raise an overflow error.
six.MAXSIZE.__index__()
except AttributeError:
# Before Python 2.6.
pass
py.test.raises(
(ValueError, OverflowError),
operator.mul, [None], six.MAXSIZE + 1)
def test_lazy():
if six.PY3:
html_name = "html.parser"
else:
html_name = "HTMLParser"
assert html_name not in sys.modules
mod = six.moves.html_parser
assert sys.modules[html_name] is mod
assert "htmlparser" not in six._MovedItems.__dict__
try:
import _tkinter
except ImportError:
have_tkinter = False
else:
have_tkinter = True
have_gdbm = True
try:
import gdbm
except ImportError:
try:
import dbm.gnu
except ImportError:
have_gdbm = False
@py.test.mark.parametrize("item_name",
[item.name for item in six._moved_attributes])
def test_move_items(item_name):
"""Ensure that everything loads correctly."""
try:
item = getattr(six.moves, item_name)
if isinstance(item, types.ModuleType):
__import__("six.moves." + item_name)
except AttributeError:
if item_name == "zip_longest" and sys.version_info < (2, 6):
py.test.skip("zip_longest only available on 2.6+")
except ImportError:
if item_name == "winreg" and not sys.platform.startswith("win"):
py.test.skip("Windows only module")
if item_name.startswith("tkinter"):
if not have_tkinter:
py.test.skip("requires tkinter")
if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
py.test.skip("ttk only available on 2.7+")
if item_name.startswith("dbm_gnu") and not have_gdbm:
py.test.skip("requires gdbm")
raise
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_parse_moved_attributes])
def test_move_items_urllib_parse(item_name):
"""Ensure that everything loads correctly."""
if item_name == "ParseResult" and sys.version_info < (2, 5):
py.test.skip("ParseResult is only found on 2.5+")
if item_name in ("parse_qs", "parse_qsl") and sys.version_info < (2, 6):
py.test.skip("parse_qs[l] is new in 2.6")
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.parse)
getattr(six.moves.urllib.parse, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_error_moved_attributes])
def test_move_items_urllib_error(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.error)
getattr(six.moves.urllib.error, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_request_moved_attributes])
def test_move_items_urllib_request(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.request)
getattr(six.moves.urllib.request, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_response_moved_attributes])
def test_move_items_urllib_response(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.response)
getattr(six.moves.urllib.response, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_robotparser_moved_attributes])
def test_move_items_urllib_robotparser(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.robotparser)
getattr(six.moves.urllib.robotparser, item_name)
def test_import_moves_error_1():
from six.moves.urllib.parse import urljoin
from six import moves
# In 1.4.1: AttributeError: 'Module_six_moves_urllib_parse' object has no attribute 'urljoin'
assert moves.urllib.parse.urljoin
def test_import_moves_error_2():
from six import moves
assert moves.urllib.parse.urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib.parse import urljoin
def test_import_moves_error_3():
from six.moves.urllib.parse import urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib_parse import urljoin
def test_from_imports():
from six.moves.queue import Queue
assert isinstance(Queue, six.class_types)
from six.moves.configparser import ConfigParser
assert isinstance(ConfigParser, six.class_types)
def test_filter():
from six.moves import filter
f = filter(lambda x: x % 2, range(10))
assert six.advance_iterator(f) == 1
def test_filter_false():
from six.moves import filterfalse
f = filterfalse(lambda x: x % 3, range(10))
assert six.advance_iterator(f) == 0
assert six.advance_iterator(f) == 3
assert six.advance_iterator(f) == 6
def test_map():
from six.moves import map
assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1
def test_zip():
from six.moves import zip
assert six.advance_iterator(zip(range(2), range(2))) == (0, 0)
@py.test.mark.skipif("sys.version_info < (2, 6)")
def test_zip_longest():
from six.moves import zip_longest
it = zip_longest(range(2), range(1))
assert six.advance_iterator(it) == (0, 0)
assert six.advance_iterator(it) == (1, None)
class TestCustomizedMoves:
def teardown_method(self, meth):
try:
del six._MovedItems.spam
except AttributeError:
pass
try:
del six.moves.__dict__["spam"]
except KeyError:
pass
def test_moved_attribute(self):
attr = six.MovedAttribute("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
assert attr.attr == "spam"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma")
assert attr.attr == "lemma"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma", "theorm")
if six.PY3:
assert attr.attr == "theorm"
else:
assert attr.attr == "lemma"
def test_moved_module(self):
attr = six.MovedModule("spam", "foo")
if six.PY3:
assert attr.mod == "spam"
else:
assert attr.mod == "foo"
attr = six.MovedModule("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
def test_custom_move_module(self):
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
from six.moves import spam
assert spam is six
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_custom_move_attribute(self):
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
from six.moves import spam
assert spam is six.u
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_empty_remove(self):
py.test.raises(AttributeError, six.remove_move, "eggs")
def test_get_unbound_function():
class X(object):
def m(self):
pass
assert six.get_unbound_function(X.m) is X.__dict__["m"]
def test_get_method_self():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_self(x.m) is x
py.test.raises(AttributeError, six.get_method_self, 42)
def test_get_method_function():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_function(x.m) is X.__dict__["m"]
py.test.raises(AttributeError, six.get_method_function, hasattr)
def test_get_function_closure():
def f():
x = 42
def g():
return x
return g
cell = six.get_function_closure(f())[0]
assert type(cell).__name__ == "cell"
def test_get_function_code():
def f():
pass
assert isinstance(six.get_function_code(f), types.CodeType)
if not hasattr(sys, "pypy_version_info"):
py.test.raises(AttributeError, six.get_function_code, hasattr)
def test_get_function_defaults():
def f(x, y=3, b=4):
pass
assert six.get_function_defaults(f) == (3, 4)
def test_get_function_globals():
def f():
pass
assert six.get_function_globals(f) is globals()
def test_dictionary_iterators(monkeypatch):
def stock_method_name(iterwhat):
"""Given a method suffix like "lists" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return iterwhat
return 'iter' + iterwhat
class MyDict(dict):
if not six.PY3:
def lists(self, **kw):
return [1, 2, 3]
def iterlists(self, **kw):
return iter([1, 2, 3])
f = MyDict.iterlists
del MyDict.iterlists
setattr(MyDict, stock_method_name('lists'), f)
d = MyDict(zip(range(10), reversed(range(10))))
for name in "keys", "values", "items", "lists":
meth = getattr(six, "iter" + name)
it = meth(d)
assert not isinstance(it, list)
assert list(it) == list(getattr(d, name)())
py.test.raises(StopIteration, six.advance_iterator, it)
record = []
def with_kw(*args, **kw):
record.append(kw["kw"])
return old(*args)
old = getattr(MyDict, stock_method_name(name))
monkeypatch.setattr(MyDict, stock_method_name(name), with_kw)
meth(d, kw=42)
assert record == [42]
monkeypatch.undo()
def test_advance_iterator():
assert six.next is six.advance_iterator
l = [1, 2]
it = iter(l)
assert six.next(it) == 1
assert six.next(it) == 2
py.test.raises(StopIteration, six.next, it)
py.test.raises(StopIteration, six.next, it)
def test_iterator():
class myiter(six.Iterator):
def __next__(self):
return 13
assert six.advance_iterator(myiter()) == 13
class myitersub(myiter):
def __next__(self):
return 14
assert six.advance_iterator(myitersub()) == 14
def test_callable():
class X:
def __call__(self):
pass
def method(self):
pass
assert six.callable(X)
assert six.callable(X())
assert six.callable(test_callable)
assert six.callable(hasattr)
assert six.callable(X.method)
assert six.callable(X().method)
assert not six.callable(4)
assert not six.callable("string")
def test_create_bound_method():
class X(object):
pass
def f(self):
return self
x = X()
b = six.create_bound_method(f, x)
assert isinstance(b, types.MethodType)
assert b() is x
if six.PY3:
def test_b():
data = six.b("\xff")
assert isinstance(data, bytes)
assert len(data) == 1
assert data == bytes([255])
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, str)
assert s == "hi \u0439 \U00000439 \\ \\\\ \n"
else:
def test_b():
data = six.b("\xff")
assert isinstance(data, str)
assert len(data) == 1
assert data == "\xff"
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, unicode)
assert s == "hi \xd0\xb9 \xd0\xb9 \\ \\\\ \n".decode("utf8")
def test_u_escapes():
s = six.u("\u1234")
assert len(s) == 1
def test_unichr():
assert six.u("\u1234") == six.unichr(0x1234)
assert type(six.u("\u1234")) is type(six.unichr(0x1234))
def test_int2byte():
assert six.int2byte(3) == six.b("\x03")
py.test.raises((OverflowError, ValueError), six.int2byte, 256)
def test_byte2int():
assert six.byte2int(six.b("\x03")) == 3
assert six.byte2int(six.b("\x03\x04")) == 3
py.test.raises(IndexError, six.byte2int, six.b(""))
def test_bytesindex():
assert six.indexbytes(six.b("hello"), 3) == ord("l")
def test_bytesiter():
it = six.iterbytes(six.b("hi"))
assert six.next(it) == ord("h")
assert six.next(it) == ord("i")
py.test.raises(StopIteration, six.next, it)
def test_StringIO():
fp = six.StringIO()
fp.write(six.u("hello"))
assert fp.getvalue() == six.u("hello")
def test_BytesIO():
fp = six.BytesIO()
fp.write(six.b("hello"))
assert fp.getvalue() == six.b("hello")
def test_exec_():
def f():
l = []
six.exec_("l.append(1)")
assert l == [1]
f()
ns = {}
six.exec_("x = 42", ns)
assert ns["x"] == 42
glob = {}
loc = {}
six.exec_("global y; y = 42; x = 12", glob, loc)
assert glob["y"] == 42
assert "x" not in glob
assert loc["x"] == 12
assert "y" not in loc
def test_reraise():
def get_next(tb):
if six.PY3:
return tb.tb_next.tb_next
else:
return tb.tb_next
e = Exception("blah")
try:
raise e
except Exception:
tp, val, tb = sys.exc_info()
try:
six.reraise(tp, val, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb is get_next(tb2)
try:
six.reraise(tp, val)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb2 is not tb
try:
six.reraise(tp, val, tb2)
except Exception:
tp2, value2, tb3 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert get_next(tb3) is tb2
try:
six.reraise(tp, None, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is not val
assert isinstance(value2, Exception)
assert tb is get_next(tb2)
def test_print_():
save = sys.stdout
out = sys.stdout = six.moves.StringIO()
try:
six.print_("Hello,", "person!")
finally:
sys.stdout = save
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out)
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, end="")
assert out.getvalue() == "Hello, person!"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, sep="X")
assert out.getvalue() == "Hello,Xperson!\n"
out = six.StringIO()
six.print_(six.u("Hello,"), six.u("person!"), file=out)
result = out.getvalue()
assert isinstance(result, six.text_type)
assert result == six.u("Hello, person!\n")
six.print_("Hello", file=None) # This works.
out = six.StringIO()
six.print_(None, file=out)
assert out.getvalue() == "None\n"
@py.test.mark.skipif("sys.version_info[:2] >= (2, 6)")
def test_print_encoding(monkeypatch):
# Fool the type checking in print_.
monkeypatch.setattr(six, "file", six.BytesIO, raising=False)
out = six.BytesIO()
out.encoding = "utf-8"
out.errors = None
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\xd4\xbc")
out = six.BytesIO()
out.encoding = "ascii"
out.errors = "strict"
py.test.raises(UnicodeEncodeError, six.print_, six.u("\u053c"), file=out)
out.errors = "backslashreplace"
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\\u053c")
def test_print_exceptions():
py.test.raises(TypeError, six.print_, x=3)
py.test.raises(TypeError, six.print_, end=3)
py.test.raises(TypeError, six.print_, sep=42)
def test_with_metaclass():
class Meta(type):
pass
class X(six.with_metaclass(Meta)):
pass
assert type(X) is Meta
assert issubclass(X, object)
class Base(object):
pass
class X(six.with_metaclass(Meta, Base)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(six.with_metaclass(Meta, Base, Base2)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
assert X.__mro__ == (X, Base, Base2, object)
def test_wraps():
def f(g):
@six.wraps(g)
def w():
return 42
return w
def k():
pass
original_k = k
k = f(f(k))
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert k is original_k
assert not hasattr(k, '__wrapped__')
def test_add_metaclass():
class Meta(type):
pass
class X:
"success"
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, object)
assert X.__module__ == __name__
assert X.__doc__ == "success"
class Base(object):
pass
class X(Base):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(Base, Base2):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
# Test a second-generation subclass of a type.
class Meta1(type):
m1 = "m1"
class Meta2(Meta1):
m2 = "m2"
class Base:
b = "b"
Base = six.add_metaclass(Meta1)(Base)
class X(Base):
x = "x"
X = six.add_metaclass(Meta2)(X)
assert type(X) is Meta2
assert issubclass(X, Base)
assert type(Base) is Meta1
assert "__dict__" not in vars(X)
instance = X()
instance.attr = "test"
assert vars(instance) == {"attr": "test"}
assert instance.b == Base.b
assert instance.x == X.x
# Test a class with slots.
class MySlots(object):
__slots__ = ["a", "b"]
MySlots = six.add_metaclass(Meta1)(MySlots)
assert MySlots.__slots__ == ["a", "b"]
instance = MySlots()
instance.a = "foo"
py.test.raises(AttributeError, setattr, instance, "c", "baz")
# Test a class with string for slots.
class MyStringSlots(object):
__slots__ = "ab"
MyStringSlots = six.add_metaclass(Meta1)(MyStringSlots)
assert MyStringSlots.__slots__ == "ab"
instance = MyStringSlots()
instance.ab = "foo"
py.test.raises(AttributeError, setattr, instance, "a", "baz")
py.test.raises(AttributeError, setattr, instance, "b", "baz")
class MySlotsWeakref(object):
__slots__ = "__weakref__",
MySlotsWeakref = six.add_metaclass(Meta)(MySlotsWeakref)
assert type(MySlotsWeakref) is Meta
|
SoItGoes33/hellowebapp
|
refs/heads/master
|
pycon-tutorial/pycon-project/collection/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
jackTheRipper/iotrussia
|
refs/heads/master
|
web_server/src/server/client/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
cherrygirl/micronaet7
|
refs/heads/master
|
accounting_loan/loan.py
|
1
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import netsvc
import logging
from openerp.osv import osv, orm, fields
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class loan_header(orm.Model):
''' Header of Loan element
'''
_name = 'loan.header'
_description = 'Account loan'
# -------------
# Button event:
# -------------
def generate_plan(self, cr, uid, ids, context=None):
''' Generate plan of rates
'''
# Delete previuos rate
rate_pool = self.pool.get("loan.rate")
rate_ids = rate_pool.search(cr, uid, [
('loan_id', '=', ids[0]),
#('rate_type', '=', 'normal'),
], context=context)
rate_pool.unlink(cr, uid, rate_ids, context=context)
loan_proxy = self.browse(cr, uid, ids, context=context)[0]
# More readeable values:
C = loan_proxy.loan_amount
i = loan_proxy.rate / 100.0
n = loan_proxy.period
# Calculated value:
R = C * i / (1.0 - (1.0 + i) ** (-n))
Res = C # Initial capital (for remain capital)
start_date = datetime.strptime(
loan_proxy.start_date, DEFAULT_SERVER_DATE_FORMAT)
if loan_proxy.loan_period == 'month':
month_x_period = 1
elif loan_proxy.loan_period == 'bimestral':
month_x_period = 2
elif loan_proxy.loan_period == 'trimestral':
month_x_period = 3
elif loan_proxy.loan_period == 'quadrimestral':
month_x_period = 4
elif loan_proxy.loan_period == 'semestral':
month_x_period = 6
elif loan_proxy.loan_period == 'year':
month_x_period = 12
for period in range(0, n):
current_date = start_date + relativedelta(
months=month_x_period * period)
rate_date = current_date.strftime(DEFAULT_SERVER_DATE_FORMAT)
I = Res * i
Res -= (R - I)
rate_pool.create(cr, uid, {
'name': period + 1,
'loan_id': loan_proxy.id,
'rate_date': rate_date,
'currency_date': rate_date, # Depend on bank
'rate_amount': R,
'rate_type': 'normal',
'capital': R - I, # (Rate - Interest)
'interest': I,
'remain': Res,
'rate': loan_proxy.rate,
}, context=context)
return True
# ----------------
# Workflow method:
# ----------------
def wkf_loan_draft(self, cr, uid, ids, context=None):
''' State function for draft
'''
self.write(cr, uid, ids, {
'state': 'draft', }, context=context)
return True
def wkf_loan_confirmed(self, cr, uid, ids, context=None):
''' State function for confirmed
'''
self.write(cr, uid, ids, {
'name': self.pool.get('ir.sequence').get(cr, uid, 'loan.header'),
'confirmed_date': datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
'state': 'confirmed',
}, context=context)
return True
def wkf_loan_approved(self, cr, uid, ids, context=None):
''' State function for approved
'''
self.write(cr, uid, ids, {
'approve_date': datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
'state': 'approved',
}, context=context)
return True
def wkf_loan_close(self, cr, uid, ids, context=None):
''' State function for close
'''
self.write(cr, uid, ids, {
'close_date': datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
'state': 'close',
}, context=context)
return True
def wkf_loan_cancel(self, cr, uid, ids, context=None):
''' State function for cancel
'''
self.write(cr, uid, ids, {
'cancel_date': datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
'state': 'cancel',
}, context=context)
return True
# ---------------
# Field function:
# ---------------
def _calculate_total_header(self, cr, uid, ids, fields, args, context=None):
''' Calculate for, header view, total of interest and total of C + I
'''
res = {}
for loan in self.browse(cr, uid, ids, context=context):
res[loan.id] = {}
interest = 0.0
capital = 0.0
payed = 0.0
for rate in loan.rate_ids:
interest += rate.interest
capital += rate.capital
if rate.state == 'payed':
payed += rate.rate_amount
res[loan.id]['total_interest'] = interest
res[loan.id]['total_amount'] = capital + interest
res[loan.id]['total_payed'] = payed
return res
_columns = {
# Loan description:
'name': fields.char('Ref.', help="Code for reference"), # By workflow
'description': fields.char('Description', size=128,
help="Extra description for loan element"),
'partner_id': fields.many2one('res.partner', 'Partner',
help='Partner that open loan or that is referred to',
required=True),
'bank_id': fields.many2one('res.partner.bank', 'Bank',
help="Bank reference for this loan, depend on partner", ),
'guarantor_id': fields.many2one('res.partner', 'Gaurantor',
help='Partner that is garantor for the loan '
'(only for statistic purposes'),
'note': fields.text('Note'),
# Loan descriptiove information:
'method':fields.selection([
('french', 'French'),
('italian', 'Italian'),
('german', 'German'),
('american', 'American'),
('variable', 'Variable duration'), ], 'Method', required=True),
'loan_type':fields.selection([
('early', 'Loan early'),
('postponed', 'Load postponed'), ], 'Loan Type', required=True),
'loan_period':fields.selection([
('month', 'Monthly'),
('bimestral', 'Bimestral'),
('trimestral', 'Trimestral'),
('quadrimestral', 'Quadrimestral'),
('semestral', 'Semestral'),
('year', 'Year'), ],'Loan Period', required=True),
'return_type': fields.selection([
('cash','By Cash'),
('cheque','By Cheque'),
('automatic','Automatic Payment'), ],'Payment Type'),
#'rate_type': fields.selection([
# ('flat','Flat'),
# ('reducing','Reducing'), ],'Rate Type'),
'rate_period': fields.selection([
('annual', 'Annual'),
('match', 'Match with loan period'), ],
'Rate period',
help="If rate is referred to annual value or to the "
"choosen period (in this case is recalculated", ),
# Loan technical data:
'loan_amount': fields.float(
'Capital',
digits=(12, 2),
required=True),
'rate': fields.float(
'Interest rate',
digits=(12, 2),
help="Rate for calculate interess",
required=True),
'period': fields.integer('Periods', required=True),
# Date elements (used in workflow):
'start_date': fields.date('Start Date'),
'request_date': fields.date('Request Date', readonly=True),
'confirmed_date': fields.date('Confirmed Date', readonly=True),
'approve_date': fields.date('Approve Date', readonly=True),
'close_date': fields.date('Close Date', readonly=True),
'cancel_date': fields.date('Cancel Date', readonly=True),
'analytic_account': fields.many2one('account.analytic.account',
type='many2one',
string="Analytic Account",
help="Account for analytic entries",
),
# Calculated fields:
# TODO convert in functions:
'total_interest': fields.function(
_calculate_total_header,
string='Total Interest',
method=True, type='float', digits=(12, 2),
store=False,
multi='total',
readonly=True),
'total_amount': fields.function(
_calculate_total_header,
string='Total amount',
method=True, type='float', digits=(12, 2),
store=False,
multi='total',
readonly=True),
'total_payed': fields.function(
_calculate_total_header,
string='Total payed',
method=True, type='float', digits=(12, 2),
store=False,
multi='total',
readonly=True),
# end_date function
# Workflow:
'state': fields.selection([
('draft','Draft'), # Draft state, to complete
('confirmed','Confirmed'), # Confirm information
('approved','Approved'), # Approved from responsible
('close','Close'), # End of life of loan
('cancel','Reject'), # Not approved
],'State', readonly=True, select=True),
}
_defaults = {
'start_date': lambda *a: datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
'request_date': lambda *a: datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
'loan_type': lambda *x: 'postponed',
'return_type': lambda *a: 'cash',
'method': lambda *a: 'french',
'loan_period': lambda *a: 'month',
'rate_period': lambda *a: 'match',
'state': lambda *a: 'draft',
}
class loan_rate(orm.Model):
''' Rate of Loan element
'''
_name = 'loan.rate'
_description = 'Account loan rate'
_order = 'loan_id,rate_date,name'
# ---------
# Override:
# ---------
def name_get(self, cr, uid, ids, context=None):
''' Present foreing ke with loan name . rate name
'''
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = "%s.%s [%s]" % (
record.loan_id.name,
record.name or "x",
record.rate_date, )
res.append((record.id, name))
return res
# ----------------
# Workflow method:
# ----------------
def wkf_rate_confirmed(self, cr, uid, ids, context=None):
''' State function for confirmed
'''
self.write(cr, uid, ids, {
'state': 'confirmed', }, context=context)
return True
def wkf_rate_payed(self, cr, uid, ids, context=None):
''' State function for confirmed
'''
self.write(cr, uid, ids, {
'state': 'payed',
'pay_date': datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
# pay_amount
}, context=context)
return True
def wkf_rate_cancel(self, cr, uid, ids, context=None):
''' State function for confirmed
'''
self.write(cr, uid, ids, {
'state': 'cancel',
'cancel_date': datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
}, context=context)
return True
_columns = {
# Loan description:
'name': fields.integer('Ref.', help="Code for reference for rate"),
# Operation date:
'rate_date': fields.date('Rate Date'),
'pay_date': fields.date('Pay Date'),
'cancel_date': fields.date('Cancel Date'),
'currency_date': fields.date('Currency Date'),
'rate_amount': fields.float('Amount', digits=(12, 2)),
'pay_amount': fields.float('Pay amount', digits=(12, 2),
help="If import is different from correct rate"),
# Rate import information:
'capital': fields.float('Capital', digits=(12, 2), readonly=True),
'interest': fields.float('Interest', digits=(12, 2), readonly=True),
'remain': fields.float('Remain', digits=(12, 2), readonly=True),
# Rate?
'rate_type': fields.selection([
('normal', 'Normal'), # Normal calculated rate
('integration', 'Integration'), # Integration rate (for interest)
],'Rate type', readonly=True, select=True),
'rate': fields.float(
'Rate applied',
digits=(12, 2),
help="Rate applied (changeable by wizard via header)",
required=True),
'loan_id': fields.many2one('loan.header', 'Loan',
ondelete='cascade'),
# Related form header:
'partner_id': fields.related(
'loan_id',
'partner_id',
type='many2one',
relation='res.partner',
string='Partner',
store=True),
'bank_id': fields.related(
'loan_id',
'bank_id',
type='many2one',
relation='res.partner.bank',
string='Bank',
store=True),
'guarantor_id': fields.related(
'loan_id',
'guarantor_id',
type='many2one',
relation='res.partner',
string='Guarantor',
store=True),
'analytic_account': fields.related(
'loan_id',
'analytic_account',
type='many2one',
relation='account.analytic.account',
string='Analitic account',
store=True),
# Workflow:
'state': fields.selection([
('confirmed','Confirmed'), # Confirmed (on create)
('payed','Payed'), # Payed (close rate)
('cancel','Cancel'), # Cancel (for deletion use)
],'State', readonly=True, select=True),
}
_defaults = {
'rate_type': lambda *x: 'integration',
'state': lambda *x: 'confirmed',
}
class loan_header_rate(orm.Model):
''' Header of Loan rate element
Changing of rate in loan header elements
(loaded from wizard)
'''
_name = 'loan.header.rate'
_description = 'Loan change rates'
_rec_name = 'rate'
_order = 'rate_id'
_columns = {
'rate': fields.float('Rate', digits=(12, 2)),
'rate_id': fields.many2one('loan.rate', 'From rate #',
ondelete='cascade'),
'rate_period': fields.selection([
('annual', 'Annual'),
('match', 'Match with loan period'), ],
'Rate period',
help="If rate is referred to annual value or to the "
"choosen period (in this case is recalculated", ),
'loan_id': fields.many2one('loan.header', 'Loan',
ondelete='cascade'),
'note': fields.text('Note'),
}
_sql_constraints = [(
'load_header_rate_change_unique',
'unique(loan_id,rate_id)',
'Change rate already setted for this period',
)]
class loan_header(orm.Model):
''' Rate of Loan element
'''
_name = 'loan.header'
_inherit = 'loan.header'
_columns = {
'rate_ids': fields.one2many('loan.rate', 'loan_id', 'Plan rate',),
'rate_change_ids': fields.one2many('loan.header.rate', 'loan_id',
'Change of rate', ),
}
class res_partner_loan(orm.Model):
''' Add extra relation to partner obj
'''
_name = 'res.partner'
_inherit = 'res.partner'
_columns = {
'loan_ids': fields.one2many('loan.header', 'partner_id', 'Loan'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bmihelac/django-shop
|
refs/heads/master
|
shop/tests/__init__.py
|
5
|
# flake8: noqa
from api import ShopApiTestCase
from cart import CartTestCase
from cart_modifiers import (
CartModifiersTestCase,
TenPercentPerItemTaxModifierTestCase,
)
from order import (
OrderConversionTestCase,
OrderPaymentTestCase,
OrderTestCase,
OrderUtilTestCase,
)
from forms import (
CartItemModelFormTestCase,
GetCartItemFormsetTestCase,
)
from payment import PayOnDeliveryTestCase, GeneralPaymentBackendTestCase
from product import ProductTestCase, ProductStatisticsTestCase
from shipping import (
FlatRateShippingTestCase,
GeneralShippingBackendTestCase,
ShippingApiTestCase,
)
from templatetags import ProductsTestCase
from util import (
AddressUtilTestCase,
CartUtilsTestCase,
CurrencyFieldTestCase,
LoaderTestCase,
ModelImportTestCase,
CircularImportTestCase,
)
from views import (
CartDetailsViewTestCase,
CartViewTestCase,
OrderListViewTestCase,
ProductDetailViewTestCase,
)
from views_checkout import (
CheckoutCartToOrderTestCase,
ShippingBillingViewOrderStuffTestCase,
ShippingBillingViewTestCase,
ThankYouViewTestCase,
)
|
nkgilley/home-assistant
|
refs/heads/dev
|
homeassistant/components/homekit/img_util.py
|
2
|
"""Image processing for HomeKit component."""
import logging
SUPPORTED_SCALING_FACTORS = [(7, 8), (3, 4), (5, 8), (1, 2), (3, 8), (1, 4), (1, 8)]
_LOGGER = logging.getLogger(__name__)
def scale_jpeg_camera_image(cam_image, width, height):
"""Scale a camera image as close as possible to one of the supported scaling factors."""
turbo_jpeg = TurboJPEGSingleton.instance()
if not turbo_jpeg:
return cam_image.content
(current_width, current_height, _, _) = turbo_jpeg.decode_header(cam_image.content)
if current_width <= width or current_height <= height:
return cam_image.content
ratio = width / current_width
scaling_factor = SUPPORTED_SCALING_FACTORS[-1]
for supported_sf in SUPPORTED_SCALING_FACTORS:
if ratio >= (supported_sf[0] / supported_sf[1]):
scaling_factor = supported_sf
break
return turbo_jpeg.scale_with_quality(
cam_image.content, scaling_factor=scaling_factor, quality=75,
)
class TurboJPEGSingleton:
"""
Load TurboJPEG only once.
Ensures we do not log load failures each snapshot
since camera image fetches happen every few
seconds.
"""
__instance = None
@staticmethod
def instance():
"""Singleton for TurboJPEG."""
if TurboJPEGSingleton.__instance is None:
TurboJPEGSingleton()
return TurboJPEGSingleton.__instance
def __init__(self):
"""Try to create TurboJPEG only once."""
try:
# TurboJPEG checks for libturbojpeg
# when its created, but it imports
# numpy which may or may not work so
# we have to guard the import here.
from turbojpeg import TurboJPEG # pylint: disable=import-outside-toplevel
TurboJPEGSingleton.__instance = TurboJPEG()
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"libturbojpeg is not installed, cameras may impact HomeKit performance."
)
TurboJPEGSingleton.__instance = False
|
firebase/grpc
|
refs/heads/master
|
src/python/grpcio_testing/testing_commands.py
|
18
|
# Copyright 2018 gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides distutils command classes for the GRPC Python setup process."""
import os
import shutil
import setuptools
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
LICENSE = os.path.join(ROOT_DIR, '../../../LICENSE')
class Preprocess(setuptools.Command):
"""Command to copy LICENSE from root directory."""
description = ''
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if os.path.isfile(LICENSE):
shutil.copyfile(LICENSE, os.path.join(ROOT_DIR, 'LICENSE'))
|
Edu-Glez/Bank_sentiment_analysis
|
refs/heads/master
|
env/lib/python3.6/site-packages/numpy/distutils/npy_pkg_config.py
|
66
|
from __future__ import division, absolute_import, print_function
import sys
import re
import os
if sys.version_info[0] < 3:
from ConfigParser import RawConfigParser, NoOptionError
else:
from configparser import RawConfigParser, NoOptionError
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(IOError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(IOError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo(object):
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet(object):
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return list(self._raw_data.keys())
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = {}
for name, value in config.items('meta'):
d[name] = value
for k in ['name', 'description', 'version']:
if not k in d:
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not 'requires' in d:
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = RawConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not k in vars:
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
if ovalue:
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
if not 'pkgdir' in vars and "pkgname" in vars:
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
(pkgname, meta["name"]))
mod = sys.modules[pkgname]
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
"""
Return library info for a package from its configuration file.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of directories - usually including
the NumPy base directory - where to look for npy-pkg-config files.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
misc_util.get_info, misc_util.get_pkg_info
Examples
--------
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
>>> print(npymath_info)
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
Version: 0.1 #random
"""
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
import sys
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
import os
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of " \
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print(info.cflags(section))
if options.libs:
print(info.libs(section))
if options.version:
print(info.version)
if options.min_version:
print(info.version >= options.min_version)
|
danieldmm/minerva
|
refs/heads/master
|
models/pytorch2.py
|
1
|
# -*- coding: utf-8 -*-
#
# task as translation
# now with epochs, features and embeddings
from __future__ import unicode_literals, print_function, division
import os
import random
import numpy as np
np.warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Torch using", device)
import torchtext.vocab as vocab
CUSTOM_SEED = 42
np.random.seed(CUSTOM_SEED)
from tqdm import tqdm
from collections import Counter
from models.base_model import BaseModel
from models.keyword_features import FeaturesReader, filterOutFeatures, normaliseFeatures, getRootDir
# cell_type=nn.LSTM
cell_type = nn.GRU
MAX_LENGTH = 100
SOS_token = 0
EOS_token = 1
UNK_token = 3
EOS_marker = "#EOS"
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
h = math.floor(m / 60)
m -= h * 60
return '%02d:%02d:%02d' % (h, m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
if percent == 0:
return "? (?)"
es = s / percent
rs = es - s
return '%s ( %s)' % (asMinutes(s), asMinutes(rs))
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS", 3: "UNK"}
self.n_words = len(self.index2word) # Count SOS / EOS / UNK
def addSentence(self, sentence):
assert isinstance(sentence, list)
for word in sentence:
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
######################################################################
# The Seq2Seq Model
# =================
#
# A Recurrent Neural Network, or RNN, is a network that operates on a
# sequence and uses its own output as input for subsequent steps.
#
# A `Sequence to Sequence network <http://arxiv.org/abs/1409.3215>`__, or
# seq2seq network, or `Encoder Decoder
# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model
# consisting of two RNNs called the encoder and decoder. The encoder reads
# an input sequence and outputs a single vector, and the decoder reads
# that vector to produce an output sequence.
#
# .. figure:: /_static/img/seq-seq-images/seq2seq.png
# :alt:
#
# Unlike sequence prediction with a single RNN, where every input
# corresponds to an output, the seq2seq model frees us from sequence
# length and order, which makes it ideal for translation between two
# languages.
#
# Consider the sentence "Je ne suis pas le chat noir" → "I am not the
# black cat". Most of the words in the input sentence have a direct
# translation in the output sentence, but are in slightly different
# orders, e.g. "chat noir" and "black cat". Because of the "ne/pas"
# construction there is also one more word in the input sentence. It would
# be difficult to produce a correct translation directly from the sequence
# of input words.
#
# With a seq2seq model the encoder creates a single vector which, in the
# ideal case, encodes the "meaning" of the input sequence into a single
# vector — a single point in some N dimensional space of sentences.
#
######################################################################
# The Encoder
# -----------
#
# The encoder of a seq2seq network is a RNN that outputs some value for
# every word from the input sentence. For every input word the encoder
# outputs a vector and a hidden state, and uses the hidden state for the
# next input word.
#
# .. figure:: /_static/img/seq-seq-images/encoder-network.png
# :alt:
#
#
class ExtraFeaturesEncoderRNN(nn.Module):
def __init__(self, vocab_size, hidden_size, lang, num_extra_features):
super(ExtraFeaturesEncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.lang = lang
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.gru = cell_type(hidden_size + num_extra_features, hidden_size)
self.loadWordVectors()
def loadWordVectors(self):
local = "/Users/masterman/NLP/PhD/vectors/glove"
if os.path.isdir(local):
vector_dir = local
else:
vector_dir = "/tmp/tmp-1135029/glove"
self.glove = vocab.GloVe(name='6B', dim=300, cache=vector_dir)
print('Loaded {} words'.format(len(self.glove.itos)))
for word, emb_index in self.lang.word2index.items():
# if the word is in the loaded glove vectors
if word.lower() in self.glove.stoi:
# get the index into the glove vectors
glove_index = self.glove.stoi[word.lower()]
# get the glove vector itself and convert to pytorch structure
# glove_vec = torch.FloatTensor(self.glove.vectors[glove_index], device=device)
# # this only matters if using cuda :)
# if device.startswith("cuda"):
# glove_vec = glove_vec.cuda()
# finally, if net is our network, and emb is the embedding layer:
self.embedding.weight.data[emb_index, :].set_(self.glove.vectors[glove_index])
self.glove = None
def forward(self, input, hidden):
embedded = self.embedding(input[0]).view(1, 1, -1)
output = torch.cat([embedded, input[1].view(1, 1, -1)], dim=2)
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = cell_type(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# The Decoder
# -----------
#
# The decoder is another RNN that takes the encoder output vector(s) and
# outputs a sequence of words to create the translation.
#
######################################################################
# Attention Decoder
# ^^^^^^^^^^^^^^^^^
#
# If only the context vector is passed betweeen the encoder and decoder,
# that single vector carries the burden of encoding the entire sentence.
#
# Attention allows the decoder network to "focus" on a different part of
# the encoder's outputs for every step of the decoder's own outputs. First
# we calculate a set of *attention weights*. These will be multiplied by
# the encoder output vectors to create a weighted combination. The result
# (called ``attn_applied`` in the code) should contain information about
# that specific part of the input sequence, and thus help the decoder
# choose the right output words.
#
# .. figure:: https://i.imgur.com/1152PYf.png
# :alt:
#
# Calculating the attention weights is done with another feed-forward
# layer ``attn``, using the decoder's input and hidden state as inputs.
# Because there are sentences of all sizes in the training data, to
# actually create and train this layer we have to choose a maximum
# sentence length (input length, for encoder outputs) that it can apply
# to. Sentences of the maximum length will use all the attention weights,
# while shorter sentences will only use the first few.
#
# .. figure:: /_static/img/seq-seq-images/attention-decoder-network.png
# :alt:
#
#
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = cell_type(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# .. note:: There are other forms of attention that work around the length
# limitation by using a relative position approach. Read about "local
# attention" in `Effective Approaches to Attention-based Neural Machine
# Translation <https://arxiv.org/abs/1508.04025>`__.
#
# Training
# ========
#
# Preparing Training Data
# -----------------------
#
# To train, for each pair we will need an input tensor (indexes of the
# words in the input sentence) and target tensor (indexes of the words in
# the target sentence). While creating these vectors we will append the
# EOS token to both sequences.
#
def indexesFromSentence(lang, sentence):
res = []
for word in sentence:
if word in lang.word2index:
res.append(lang.word2index[word])
else:
res.append(UNK_token)
return res
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def getInputTensor(context, input_lang, dict_vectorizer):
text_in = getTextTokens(context)
context = filterOutFeatures(context) # FIXME
features = [dict_vectorizer.transform(t) for t in context["tokens"]]
feat_len = len(features[0])
features_tensors = [torch.tensor(feat, dtype=torch.float, device=device).view(-1, feat_len) for feat in features]
indexes = tensorFromSentence(input_lang, text_in)
input_list = [p for p in zip(indexes, features_tensors)]
return input_list
def getOutputTensor(context, output_lang):
text_out = getTokensToExtract(context)
target_tensor = tensorFromSentence(output_lang, text_out)
return target_tensor
def getTensorsWithFeatures(context, input_lang, output_lang, dict_vectorizer):
return getInputTensor(context, input_lang, dict_vectorizer), getOutputTensor(context, output_lang)
######################################################################
# Training the Model
# ------------------
#
# To train we run the input sentence through the encoder, and keep track
# of every output and the latest hidden state. Then the decoder is given
# the ``<SOS>`` token as its first input, and the last hidden state of the
# encoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as
# each next input, instead of using the decoder's guess as the next input.
# Using teacher forcing causes it to converge faster but `when the trained
# network is exploited, it may exhibit
# instability <http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf>`__.
#
# You can observe outputs of teacher-forced networks that read with
# coherent grammar but wander far from the correct translation -
# intuitively it has learned to represent the output grammar and can "pick
# up" the meaning once the teacher tells it the first few words, but it
# has not properly learned how to create the sentence from the translation
# in the first place.
#
# Because of the freedom PyTorch's autograd gives us, we can randomly
# choose to use teacher forcing or not with a simple if statement. Turn
# ``teacher_forcing_ratio`` up to use more of it.
#
teacher_forcing_ratio = 0
def train(input_list, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion,
max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = min(len(input_list), max_length)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_list[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
######################################################################
# The whole training process looks like this:
#
# - Start a timer
# - Initialize optimizers and criterion
# - Create set of training pairs
# - Start empty losses array for plotting
#
# Then we call ``train`` many times and occasionally print the progress (%
# of examples, time so far, estimated time) and average loss.
#
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points, filename):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
plt.savefig(filename, dpi=600)
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
def evaluate(encoder,
decoder,
input_list,
output_lang,
max_length=MAX_LENGTH):
"""
Generate the output for a single context
:param encoder:
:param decoder:
:param context:
:param input_lang:
:param output_lang:
:param dict_vectorizer:
:param max_length:
:return:
"""
with torch.no_grad():
# input_list = getInputTensor(context, input_lang, dict_vectorizer)
# input_length = input_tensor.size()[0]
input_length = min(len(input_list), max_length)
encoder_hidden = encoder.initHidden()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_list[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) # SOS
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append(EOS_marker)
break
else:
decoded_words.append(output_lang.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
######################################################################
# Training and Evaluating
# =======================
#
# With all these helper functions in place (it looks like extra work, but
# it makes it easier to run multiple experiments) we can actually
# initialize a network and start training.
#
# Remember that the input sentences were heavily filtered. For this small
# dataset we can use relatively small networks of 256 hidden nodes and a
# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some
# reasonable results.
#
# .. Note::
# If you run this notebook you can train, interrupt the kernel,
# evaluate, and continue training later. Comment out the lines where the
# encoder and decoder are initialized and run ``trainIters`` again.
#
# ======================================
def getTextTokens(context):
tokens = [t["text"].lower() for t in context["tokens"]]
return tokens
def getContextsTextTokens(contexts):
return [getTextTokens(context) for context in contexts]
def getTokensToExtract(context):
return [t[0] for t in context["best_kws"]]
def getTargetTranslations(contexts):
translations = []
for context in contexts:
tokens = [t[0] for t in context["best_kws"]]
translations.append(tokens)
return translations
def measurePR(truth, predictions):
if len(truth) == 0:
return 0, 0
tp = fp = fn = 0
for word in predictions:
if word in truth:
tp += 1
else:
fp += 1
for word in truth:
if word not in predictions:
fn += 1
try:
precision = tp / (tp + fp)
except ZeroDivisionError:
precision = 0
try:
recall = tp / (tp + fn)
except ZeroDivisionError:
recall = 0
return precision, recall, tp, (tp + fp), (tp + fn)
class TorchModel(BaseModel):
def __init__(self, exp_dir, params={},
train_data_filename="feature_data.json.gz",
test_data_filename="feature_data_test.json.gz"):
super(TorchModel, self).__init__(exp_dir, params, train_data_filename, test_data_filename)
self.epochs = params.get("num_epochs", 10)
self.optimizer_class = params.get("optimizer", "SGD")
self.print_every = params.get("print_every", 100)
self.plot_every = params.get("plot_every", 100)
self.learning_rate = params.get("learning_rate", 0.01)
self.hidden_size = params.get("hidden_size", 512)
self.dropout_p = params.get("dropout_p", 0.1)
def augmentSingleContext(self, context):
pass
def processFeatures(self):
self.context_tokens = getContextsTextTokens(self.contexts)
# for context in tqdm(self.contexts, desc="Adding context features"):
# self.augmentSingleContext(context)
normaliseFeatures(self.contexts)
def postProcessLoadedData(self):
self.MAX_CONTEXT_LEN = max([len(x["tokens"]) for x in self.contexts]) + 2
train_val_cutoff = int(.80 * len(self.contexts))
self.training_contexts = self.contexts[:train_val_cutoff]
self.validation_contexts = self.contexts[train_val_cutoff:]
self.X_train = getContextsTextTokens(self.training_contexts)
self.X_val = getContextsTextTokens(self.validation_contexts)
# self.X_train, self.y_train = getTrainTestData(self.training_contexts)
# self.X_val, self.y_val = getTrainTestData(self.validation_contexts)
# self.X_train = matrixFromContextFeatures(self.X_train, self.dict_vectorizer, self.MAX_CONTEXT_LEN)
# self.X_val = matrixFromContextFeatures(self.X_val, self.dict_vectorizer, self.MAX_CONTEXT_LEN)
self.y_train = getTargetTranslations(self.training_contexts)
self.y_val = getTargetTranslations(self.validation_contexts)
self.lang = Lang("input")
# self.output_lang = Lang("output")
for words in self.X_train + self.X_val:
for word in words:
self.lang.addWord(word)
for words in self.y_train + self.y_val:
for word in words:
self.lang.addWord(word)
self.pairs = [p for p in zip(self.X_train, self.y_train)]
def defineModel(self):
print("Creating model...")
hidden_size = 300
self.encoder = ExtraFeaturesEncoderRNN(self.lang.n_words,
hidden_size,
self.lang,
self.num_extra_features).to(device)
self.decoder = AttnDecoderRNN(hidden_size,
self.lang.n_words,
dropout_p=self.dropout_p,
max_length=self.MAX_CONTEXT_LEN).to(device)
def trainModel(self):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
optimizer = getattr(optim, self.optimizer_class)
encoder_optimizer = optimizer(self.encoder.parameters(), lr=self.learning_rate)
decoder_optimizer = optimizer(self.decoder.parameters(), lr=self.learning_rate)
training_pairs = []
for context in tqdm(self.training_contexts, desc="Vectorizing data"):
training_pairs.append(getTensorsWithFeatures(context,
self.lang,
self.lang,
self.dict_vectorizer))
criterion = nn.NLLLoss()
print("Training...")
for epoch in range(1, self.epochs + 1):
interrupted = False
for iteration, training_pair in enumerate(training_pairs):
try:
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor,
target_tensor,
self.encoder,
self.decoder,
encoder_optimizer,
decoder_optimizer,
criterion,
max_length=self.MAX_CONTEXT_LEN)
print_loss_total += loss
plot_loss_total += loss
if iteration % self.print_every == 0:
print_loss_avg = print_loss_total / self.print_every
print_loss_total = 0
print('Epoch %d: %s (%d %d%%) %.4f' % (epoch,
timeSince(start, iteration / float(self.epochs)),
iteration,
iteration / len(training_pairs),
print_loss_avg))
if iteration % self.plot_every == 0:
plot_loss_avg = plot_loss_total / self.plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
except KeyboardInterrupt:
print("Training interrupted")
interrupted = True
break
if interrupted:
break
random.shuffle(training_pairs)
showPlot(plot_losses, os.path.join(self.exp_dir, "pytorch_training.png"))
def testModel(self):
print("Testing...")
self.reader = FeaturesReader(self.test_data_filename)
self.testing_contexts = [c for c in self.reader]
self.X_test = [getInputTensor(context, self.lang, self.dict_vectorizer) for context in
self.testing_contexts]
self.y_test = getTargetTranslations(self.testing_contexts)
all_recall = []
all_precision = []
all_tp = []
all_p_sum = []
all_r_sum = []
for index, input_tensor in enumerate(tqdm(self.X_test)):
truth = {t for t in self.y_test[index]}
predictions, attentions = evaluate(self.encoder,
self.decoder,
input_tensor,
self.lang,
max_length=self.MAX_CONTEXT_LEN)
predictions = [p for p in predictions if p != EOS_marker]
predictions = Counter(predictions)
precision, recall, tp, p_sum, r_sum = measurePR(truth, predictions)
all_recall.append(recall)
all_precision.append(precision)
all_tp.append(precision)
all_p_sum.append(p_sum)
all_r_sum.append(r_sum)
numsamples = float(len(all_recall))
tp = sum(all_tp)
p_sum = sum(all_p_sum)
r_sum = sum(all_r_sum)
overall_recall = sum(all_recall) / numsamples
overall_precision = sum(all_precision) / numsamples
print("Precision %d/%d %0.2f Recall %d/%d %0.2f" % (tp, p_sum, overall_precision,
tp, r_sum, overall_recall))
def plotPerformance(self):
""" Plot model loss and accuracy through epochs. """
pass
def saveModel(self):
model_name = self.__class__.__name__
# def evaluateRandomly(self, n=10):
# for i in range(n):
# pair = random.choice(self.pairs)
# print('>', pair[0])
# print('=', pair[1])
# output_words, attentions = evaluate(self.encoder, self.decoder, pair[0])
# output_sentence = ' '.join(output_words)
# print('<', output_sentence)
# print('')
def stochasticAugmentation(self):
pass
# lens = []
# to_choose = []
# for index, pair in enumerate(self.pairs):
# cur_len = len(pair[1])
# lens.append((index, cur_len))
# for _ in range(cur_len):
# to_choose.append(index)
#
# lens = sorted(lens, key=lambda x: x[1], reverse=True)
# First we fill the list with unique examples, starting with the longest extracted query first
# pairs_list = [self.pairs[p[0]] for p in lens[:self.epochs]]
# remaining = max(0, self.epochs - len(lens))
# If we need more training data, we stochastically pick more training examples by length as above
# random.shuffle(to_choose)
# pairs_list.extend([self.pairs[random.choice(to_choose)] for _ in range(remaining)])
def main(num_epochs=10, reset=False):
params = {
"num_epochs": num_epochs,
"print_every": 100,
# "learning_rate": 0.003,
"learning_rate": 0.01,
# "optimizer": "Adam",
"optimizer": "SGD",
}
exp_dir = os.path.join(getRootDir("aac"), "experiments", "aac_generate_kw_trace")
model = TorchModel(exp_dir, params=params)
model.run()
if __name__ == '__main__':
import plac
plac.call(main)
|
google/citest
|
refs/heads/master
|
tests/base/test_package.py
|
5
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from citest.base import run_all_tests_in_dir
if __name__ == '__main__':
run_all_tests_in_dir()
|
adelomana/30sols
|
refs/heads/master
|
F5.EGRIN/coregulation.detector.py
|
1
|
"""
this script finds which other genes are present in the ribosomal corems that are not ribosomal genes
"""
import sys
# 0. user defined data
corems2GenesFile='/Volumes/omics4tb/alomana/projects/TLR/data/corem/hsa_c2g.txt'
ribosomalGenesFile='/Volumes/omics4tb/alomana/projects/TLR/data/ribosomalGeneNames.txt'
ribosomalCoremsFile='/Volumes/omics4tb/alomana/projects/TLR/data/GREs/classMembership.txt'
# 1. reading files
"""
select ribosomal corems
find all genes
substract ribosomal genes
"""
# 1.1. reading ribosomal gene names
ribosomalGenes=[]
with open(ribosomalGenesFile,'r') as f:
next(f)
for line in f:
vector=line.split()
ribosomalGenes.append(vector[0])
# 1.2. reading ribosomal corems
ribosomalCorems=[]
with open(ribosomalCoremsFile,'r') as f:
next(f)
for line in f:
vector=line.split()
ribosomalCorems.append(vector[1])
# 1.3. reading all genes of corems
allGenes=[]
with open(corems2GenesFile,'r') as f:
next(f)
for line in f:
vector=line.split()
corem=vector[0]
if corem in ribosomalCorems:
some=vector[1].split(';')
print(some)
for element in some:
if element not in allGenes:
allGenes.append(element)
# 2. analysis
# 2.1. check all ribosomal genes are recapitulated
intersect=list(set(ribosomalGenes) & set(allGenes))
print(len(intersect),len(ribosomalGenes))
# 2.2. find the set of genes that are not ribosomal genes
corregulated=[]
for element in allGenes:
if element not in ribosomalGenes:
corregulated.append(element)
print(corregulated,len(corregulated))
|
rbaindourov/v8-inspector
|
refs/heads/master
|
Source/chrome/tools/resources/list_resources_removed_by_repack.py
|
95
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
usage = """%s BUILDTYPE BUILDDIR
BUILDTYPE: either chromium or chrome.
BUILDDIR: The path to the output directory. e.g. relpath/to/out/Release
Prints out (to stdout) the sorted list of resource ids that are marked as
unused during the repacking process in the given build log (via stdin).
Additionally, attempt to print out the name of the resource and the generated
header file that contains the resource.
This script is used to print the list of resources that are not used so that
developers will notice and fix their .grd files.
"""
def GetResourceIdsFromRepackMessage(in_data):
"""Returns sorted set of resource ids that are not used from in_data.
"""
unused_resources = set()
unused_pattern = re.compile(
'RePackFromDataPackStrings Removed Key: (?P<resource_id>[0-9]+)')
for line in in_data:
match = unused_pattern.match(line)
if match:
resource_id = int(match.group('resource_id'))
unused_resources.add(resource_id)
return sorted(unused_resources)
def Main():
if len(sys.argv) != 3:
sys.stderr.write(usage % sys.argv[0])
return 1
build_type = sys.argv[1]
build_dir = sys.argv[2]
if build_type not in ('chromium', 'chrome'):
sys.stderr.write(usage % sys.argv[0])
return 1
generated_output_dir = os.path.join(build_dir, 'gen')
if not os.path.exists(generated_output_dir):
sys.stderr.write('Cannot find gen dir %s' % generated_output_dir)
return 1
if build_type == 'chromium':
excluded_header = 'google_chrome_strings.h'
else:
excluded_header = 'chromium_strings.h'
data_files = []
for root, dirs, files in os.walk(generated_output_dir):
if os.path.basename(root) != 'grit':
continue
header_files = [header for header in files if header.endswith('.h')]
if excluded_header in header_files:
header_files.remove(excluded_header)
data_files.extend([os.path.join(root, header) for header in header_files])
resource_id_to_name_file_map = {}
resource_pattern = re.compile('#define (?P<resource_name>[A-Z0-9_]+).* '
'(?P<resource_id>[0-9]+)$')
for f in data_files:
data = open(f).read()
for line in data.splitlines():
match = resource_pattern.match(line)
if match:
resource_id = int(match.group('resource_id'))
resource_name = match.group('resource_name')
if resource_id in resource_id_to_name_file_map:
print 'Duplicate:', resource_id
print (resource_name, f)
print resource_id_to_name_file_map[resource_id]
raise
resource_id_to_name_file_map[resource_id] = (resource_name, f)
unused_resources = GetResourceIdsFromRepackMessage(sys.stdin)
for resource_id in unused_resources:
if resource_id not in resource_id_to_name_file_map:
print 'WARNING: Unknown resource id', resource_id
continue
(resource_name, filename) = resource_id_to_name_file_map[resource_id]
sys.stdout.write('%d: %s in %s\n' % (resource_id, resource_name, filename))
return 0
if __name__ == '__main__':
sys.exit(Main())
|
thread/django-directed-edge
|
refs/heads/master
|
django_directed_edge/utils.py
|
2
|
import collections
from directed_edge import Database
from django.db import models
from django.db.models.base import ModelBase
from . import app_settings
def get_database():
return Database(app_settings.USERNAME, app_settings.PASSWORD)
def ident(obj, pk=None):
"""
Converts a Django model or model instance into a text representation for
storing in Directed Edge.
>>> ident(User)
auth.User
>>> ident(User, 2)
auth.User#2
>>> ident(User.objects.get(pk=2))
auth.User#2
"""
if isinstance(obj, basestring):
return obj
ret = '%s.%s' % (obj._meta.app_label, obj._meta.object_name)
if not isinstance(obj, ModelBase):
if pk is not None:
raise ValueError("Cannot specify a primary key with an instance")
# Object is an instance
ret += '#%d' % obj.pk
if pk is not None:
ret += '#%d' % pk
return ret
def rident(items):
"""
Converts a list of ident()-like values into Django model instances.
Will perform n queries, where 'n' is the number of distinct models referred
to by the items.
"""
model_pks = collections.defaultdict(set)
for item in items:
appmodel, pk = item.split('#', 1)
model_pks[appmodel].add(pk)
instances = {}
for appmodel, pks in model_pks.iteritems():
app, model_name = appmodel.split('.', 1)
model = models.get_model(app, model_name)
if model is None:
continue
instances[appmodel] = model.objects.in_bulk(pks)
result = []
for item in items:
appmodel, pk = item.split('#', 1)
try:
result.append(instances[appmodel][int(pk)])
except KeyError:
pass
return result
def to_pks(items):
return [int(x.split('#', 1)[1]) for x in items]
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
cpython/231_test_winsound.py
|
7
|
# Ridiculously simple test of the winsound module for Windows.
import unittest
from test import test_support
import winsound, time
class BeepTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(TypeError, winsound.Beep)
self.assertRaises(ValueError, winsound.Beep, 36, 75)
self.assertRaises(ValueError, winsound.Beep, 32768, 75)
def test_extremes(self):
winsound.Beep(37, 75)
winsound.Beep(32767, 75)
def test_increasingfrequency(self):
for i in xrange(100, 2000, 100):
winsound.Beep(i, 75)
class MessageBeepTest(unittest.TestCase):
def tearDown(self):
time.sleep(0.5)
def test_default(self):
self.assertRaises(TypeError, winsound.MessageBeep, "bad")
self.assertRaises(TypeError, winsound.MessageBeep, 42, 42)
winsound.MessageBeep()
def test_ok(self):
winsound.MessageBeep(winsound.MB_OK)
def test_asterisk(self):
winsound.MessageBeep(winsound.MB_ICONASTERISK)
def test_exclamation(self):
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
def test_hand(self):
winsound.MessageBeep(winsound.MB_ICONHAND)
def test_question(self):
winsound.MessageBeep(winsound.MB_ICONQUESTION)
class PlaySoundTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(TypeError, winsound.PlaySound)
self.assertRaises(TypeError, winsound.PlaySound, "bad", "bad")
self.assertRaises(
RuntimeError,
winsound.PlaySound,
"none", winsound.SND_ASYNC | winsound.SND_MEMORY
)
def test_alias_asterisk(self):
winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS)
def test_alias_exclamation(self):
winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)
def test_alias_exit(self):
winsound.PlaySound('SystemExit', winsound.SND_ALIAS)
def test_alias_hand(self):
winsound.PlaySound('SystemHand', winsound.SND_ALIAS)
def test_alias_question(self):
winsound.PlaySound('SystemQuestion', winsound.SND_ALIAS)
def test_alias_fallback(self):
# This test can't be expected to work on all systems. The MS
# PlaySound() docs say:
#
# If it cannot find the specified sound, PlaySound uses the
# default system event sound entry instead. If the function
# can find neither the system default entry nor the default
# sound, it makes no sound and returns FALSE.
#
# It's known to return FALSE on some real systems.
# winsound.PlaySound('!"$%&/(#+*', winsound.SND_ALIAS)
return
def test_alias_nofallback(self):
try:
winsound.PlaySound(
'!"$%&/(#+*',
winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
except RuntimeError:
pass
def test_stopasync(self):
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP
)
time.sleep(0.5)
try:
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_NOSTOP
)
except RuntimeError:
pass
else: # the first sound might already be finished
pass
winsound.PlaySound(None, winsound.SND_PURGE)
def test_main():
test_support.run_unittest(BeepTest, MessageBeepTest, PlaySoundTest)
if __name__=="__main__":
test_main()
|
nesterione/experiments-of-programming
|
refs/heads/master
|
Python/db/mongo/test.py
|
1
|
import pymongo
import datetime
from pymongo import MongoClient
from bson.objectid import ObjectId
# The web framework gets post_id from the URL and passes it as a string
def get(post_id):
# Convert from string to ObjectId:
document = client.db.collection.find_one({'_id': ObjectId(post_id)})
################
# Connection
################
# client = MongoClient() connect on the default
# client = MongoClient('localhost', 27017)
# or
client = MongoClient('mongodb://localhost:27017/')
db = client.mydb
# or
#db = client['mydb']
#Getting collections
collection = db.testData
#or
#collection = db['testData']
################
# Data insert
################
post = {
"author": "Igor",
"text": "My first blog post!",
"tags": ["mongo","python", "good"],
"date": datetime.datetime.utcnow()
}
posts = db.posts
post_id = posts.insert(post)
#print(post_id)
print(db.collection_names())
################
# Getting a Single Document With find_one()
################
print(posts.find_one())
# query
print(posts.find_one({"author": "Igor"}))
################
# Querying By ObjectId
################
posts.find_one({"_id":post_id})
new_posts = [{"author": "Mike",
"text": "Another post!",
"tags": ["bulk", "insert"],
"date": datetime.datetime(2009, 11, 12, 11, 14)},
{"author": "Eliot",
"title": "MongoDB is fun",
"text": "and pretty easy too!",
"date": datetime.datetime(2009, 11, 10, 10, 45)}]
#posts.insert(new_posts)
# count documents
print(posts.count())
# get some documents
# get coursor
for post in posts.find():
print(post)
# or
print("*************")
for post in posts.find({"author": "Mike"}):
print(post)
#################
# Range Queries
#################
d = datetime.datetime(2009, 11, 12, 12)
for post in posts.find({"date": {"$lt": d}}).sort("author"):
print(post)
###############
# Indexing
###############
print(posts.find({"date": {"$lt": d}}).sort("author").explain()["cursor"])
print(posts.find({"date": {"$lt": d}}).sort("author").explain()["nscanned"])
from pymongo import ASCENDING, DESCENDING
print(posts.create_index([("date", DESCENDING), ("author", ASCENDING)]))
print(posts.find({"date": {"$lt": d}}).sort("author").explain()["nscanned"])
|
kyleburnett/iris
|
refs/heads/master
|
iris.py
|
1
|
#
# Name: iris.py
# Description: Implementation of an artifical neural network
# for classifying the iris flower types as specified in the
# Iris dataset found at:
#
# http://archive.ics.uci.edu/ml/datasets/Iris
#
# Author(s): Kyle Burnett
#
import random
import math
filename = "iris.data"
basket = []
with open(filename) as ifs:
for line in ifs:
sample = line.strip().split(',')
for i in range(4):
sample[i] = float(sample[i])
basket.append(sample)
# Sigmoid function f(x) = 1 / (1 + e^(-x))
def sigmoid(x):
return 1 / (1 + math.exp(-x))
# Use for propogating signals through the network
def forwardfeed(sample, weights):
total = 0.0
for x, weight in zip(sample, weights):
total += x * weight
return sigmoid(total)
# Initialize weights
w = [0.0, 0.0, 0.0, 0.0]
alpha = 0.1
# This, right now, doesn't enforce not having duplicates, i.e., major
# overfitting problems.
for i in range(100):
# Choose sample
sample = random.choice(basket)
# Feed input data in (feedforward)
sigma = forwardfeed(sample[:-1], w)
# Compute error gradient
if sample[4] == 'Iris-setosa':
# Here the expected value is 1.0
error = sigma * (1.0 - sigma) * (1.0 - sigma)
else:
# Here the expected value is 0.0
error = sigma * (1.0 - sigma) * (0.0 - sigma)
# Update weights
for j in range(4):
w[j] = w[j] + alpha * sample[j] * error
|
SantosDevelopers/sborganicos
|
refs/heads/master
|
venv/lib/python3.5/site-packages/django/utils/translation/__init__.py
|
51
|
"""
Internationalization support.
"""
from __future__ import unicode_literals
import re
import warnings
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.utils.functional import lazy
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, six.integer_types):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
original_kwargs = kwargs.copy()
class NumberAwareString(resultclass):
def __bool__(self):
return bool(kwargs['singular'])
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError(
"Your dictionary lacks key '%s\'. Please provide "
"it, because it is required to determine whether "
"string is singular or plural." % number
)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
proxy.__reduce__ = lambda: (_lazy_number_unpickle, (func, resultclass, number, original_kwargs))
return proxy
def _lazy_number_unpickle(func, resultclass, number, kwargs):
return lazy_number(func, resultclass, number=number, **kwargs)
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.old_language is None:
deactivate_all()
elif self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, **kwargs):
from .template import templatize
return templatize(src, **kwargs)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
warnings.warn(
'django.utils.translate.string_concat() is deprecated in '
'favor of django.utils.text.format_lazy().',
RemovedInDjango21Warning, stacklevel=2)
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
info = get_language_info(lang_info['fallback'][0])
else:
info = lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
if info:
info['name_translated'] = ugettext_lazy(info['name'])
return info
trim_whitespace_re = re.compile(r'\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
|
rhinstaller/anaconda
|
refs/heads/master
|
pyanaconda/modules/common/custom_typing.py
|
3
|
#
# Copyright (C) 2020 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from dasbus.typing import * # pylint: disable=wildcard-import
__all__ = ["BusName"]
# Type of a DBus service name.
BusName = Str
|
b0ttl3z/SickRage
|
refs/heads/master
|
lib/mako/cache.py
|
42
|
# mako/cache.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import compat, util
_cache_plugins = util.PluginLoader("mako.cache")
register_plugin = _cache_plugins.register
register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl")
class Cache(object):
"""Represents a data content cache made available to the module
space of a specific :class:`.Template` object.
.. versionadded:: 0.6
:class:`.Cache` by itself is mostly a
container for a :class:`.CacheImpl` object, which implements
a fixed API to provide caching services; specific subclasses exist to
implement different
caching strategies. Mako includes a backend that works with
the Beaker caching system. Beaker itself then supports
a number of backends (i.e. file, memory, memcached, etc.)
The construction of a :class:`.Cache` is part of the mechanics
of a :class:`.Template`, and programmatic access to this
cache is typically via the :attr:`.Template.cache` attribute.
"""
impl = None
"""Provide the :class:`.CacheImpl` in use by this :class:`.Cache`.
This accessor allows a :class:`.CacheImpl` with additional
methods beyond that of :class:`.Cache` to be used programmatically.
"""
id = None
"""Return the 'id' that identifies this cache.
This is a value that should be globally unique to the
:class:`.Template` associated with this cache, and can
be used by a caching system to name a local container
for data specific to this template.
"""
starttime = None
"""Epochal time value for when the owning :class:`.Template` was
first compiled.
A cache implementation may wish to invalidate data earlier than
this timestamp; this has the effect of the cache for a specific
:class:`.Template` starting clean any time the :class:`.Template`
is recompiled, such as when the original template file changed on
the filesystem.
"""
def __init__(self, template, *args):
# check for a stale template calling the
# constructor
if isinstance(template, compat.string_types) and args:
return
self.template = template
self.id = template.module.__name__
self.starttime = template.module._modified_time
self._def_regions = {}
self.impl = self._load_impl(self.template.cache_impl)
def _load_impl(self, name):
return _cache_plugins.load(name)(self)
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
return self._ctx_get_or_create(key, creation_function, None, **kw)
def _ctx_get_or_create(self, key, creation_function, context, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
if not self.template.cache_enabled:
return creation_function()
return self.impl.get_or_create(
key,
creation_function,
**self._get_cache_kw(kw, context))
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
self.impl.set(key, value, **self._get_cache_kw(kw, None))
put = set
"""A synonym for :meth:`.Cache.set`.
This is here for backwards compatibility.
"""
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None))
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
self.impl.invalidate(key, **self._get_cache_kw(kw, None))
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this
template.
"""
self.invalidate('render_body', __M_defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular ``<%def>`` within this
template.
"""
self.invalidate('render_%s' % name, __M_defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested ``<%def>`` within this template.
Caching of nested defs is a blunt tool as there is no
management of scope -- nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, __M_defname=name)
def _get_cache_kw(self, kw, context):
defname = kw.pop('__M_defname', None)
if not defname:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
elif defname in self._def_regions:
tmpl_kw = self._def_regions[defname]
else:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
self._def_regions[defname] = tmpl_kw
if context and self.impl.pass_context:
tmpl_kw = tmpl_kw.copy()
tmpl_kw.setdefault('context', context)
return tmpl_kw
class CacheImpl(object):
"""Provide a cache implementation for use by :class:`.Cache`."""
def __init__(self, cache):
self.cache = cache
pass_context = False
"""If ``True``, the :class:`.Context` will be passed to
:meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``.
"""
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value.
This function *must* return a value, either from
the cache, or via the given creation function.
If the creation function is called, the newly
created value should be populated into the cache
under the given key before being returned.
:param key: the value's key.
:param creation_function: function that when called generates
a new value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
|
moondrop-entertainment/django-nonrel-drawp
|
refs/heads/master
|
django/contrib/localflavor/at/at_states.py
|
537
|
# -*- coding: utf-8 -*
from django.utils.translation import ugettext_lazy as _
STATE_CHOICES = (
('BL', _('Burgenland')),
('KA', _('Carinthia')),
('NO', _('Lower Austria')),
('OO', _('Upper Austria')),
('SA', _('Salzburg')),
('ST', _('Styria')),
('TI', _('Tyrol')),
('VO', _('Vorarlberg')),
('WI', _('Vienna')),
)
|
arahuja/scikit-learn
|
refs/heads/master
|
sklearn/gaussian_process/tests/__init__.py
|
12133432
| |
eltonkevani/tempest_el_env
|
refs/heads/master
|
tempest/api/orchestration/stacks/__init__.py
|
12133432
| |
nikhilprathapani/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/plugins/twisted_trial.py
|
122
|
from zope.interface import implements
from twisted.trial.itrial import IReporter
from twisted.plugin import IPlugin
class _Reporter(object):
implements(IPlugin, IReporter)
def __init__(self, name, module, description, longOpt, shortOpt, klass):
self.name = name
self.module = module
self.description = description
self.longOpt = longOpt
self.shortOpt = shortOpt
self.klass = klass
Tree = _Reporter("Tree Reporter",
"twisted.trial.reporter",
description="verbose color output (default reporter)",
longOpt="verbose",
shortOpt="v",
klass="TreeReporter")
BlackAndWhite = _Reporter("Black-And-White Reporter",
"twisted.trial.reporter",
description="Colorless verbose output",
longOpt="bwverbose",
shortOpt="o",
klass="VerboseTextReporter")
Minimal = _Reporter("Minimal Reporter",
"twisted.trial.reporter",
description="minimal summary output",
longOpt="summary",
shortOpt="s",
klass="MinimalReporter")
Classic = _Reporter("Classic Reporter",
"twisted.trial.reporter",
description="terse text output",
longOpt="text",
shortOpt="t",
klass="TextReporter")
Timing = _Reporter("Timing Reporter",
"twisted.trial.reporter",
description="Timing output",
longOpt="timing",
shortOpt=None,
klass="TimingTextReporter")
Subunit = _Reporter("Subunit Reporter",
"twisted.trial.reporter",
description="subunit output",
longOpt="subunit",
shortOpt=None,
klass="SubunitReporter")
|
petroswork/pydantic
|
refs/heads/master
|
benchmarks/test_marshmallow.py
|
1
|
from marshmallow import Schema, fields, validate
class TestMarshmallow:
package = 'marshmallow'
def __init__(self, allow_extra):
class LocationSchema(Schema):
latitude = fields.Float(allow_none=True)
longitude = fields.Float(allow_none=True)
class SkillSchema(Schema):
subject = fields.Str(required=True)
subject_id = fields.Integer(required=True)
category = fields.Str(required=True)
qual_level = fields.Str(required=True)
qual_level_id = fields.Integer(required=True)
qual_level_ranking = fields.Float(default=0)
class Model(Schema):
id = fields.Integer(required=True)
client_name = fields.Str(validate=validate.Length(max=255), required=True)
sort_index = fields.Float(required=True)
#client_email = fields.Email()
client_phone = fields.Str(validate=validate.Length(max=255), allow_none=True)
location = LocationSchema()
contractor = fields.Integer(validate=validate.Range(min=0), allow_none=True)
upstream_http_referrer = fields.Str(validate=validate.Length(max=1023), allow_none=True)
grecaptcha_response = fields.Str(validate=validate.Length(min=20, max=1000), required=True)
last_updated = fields.DateTime(allow_none=True)
skills = fields.Nested(SkillSchema(many=True))
self.allow_extra = allow_extra # unused
self.schema = Model()
def validate(self, data):
result = self.schema.load(data)
if result.errors:
return False, result.errors
else:
return True, result.data
|
hbrunn/OCB
|
refs/heads/8.0
|
openerp/report/print_xml.py
|
338
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import openerp
import openerp.tools as tools
from openerp.tools.safe_eval import safe_eval
import print_fnc
from openerp.osv.orm import BaseModel
class InheritDict(dict):
# Might be usefull when we're doing name lookup for call or eval.
def __init__(self, parent=None):
self.parent = parent
def __getitem__(self, name):
if name in self:
return super(InheritDict, self).__getitem__(name)
else:
if not self.parent:
raise KeyError
else:
return self.parent[name]
def tounicode(val):
if isinstance(val, str):
unicode_val = unicode(val, 'utf-8')
elif isinstance(val, unicode):
unicode_val = val
else:
unicode_val = unicode(val)
return unicode_val
class document(object):
def __init__(self, cr, uid, datas, func=False):
# create a new document
self.cr = cr
self.pool = openerp.registry(cr.dbname)
self.func = func or {}
self.datas = datas
self.uid = uid
self.bin_datas = {}
def node_attrs_get(self, node):
if len(node.attrib):
return node.attrib
return {}
def get_value(self, browser, field_path):
fields = field_path.split('.')
if not len(fields):
return ''
value = browser
for f in fields:
if isinstance(value, (BaseModel, list)):
if not value:
return ''
value = value[0]
value = value[f]
return value or ''
def get_value2(self, browser, field_path):
value = self.get_value(browser, field_path)
if isinstance(value, BaseModel):
return value.id
else:
return value
def eval(self, record, expr):
#TODO: support remote variables (eg address.title) in expr
# how to do that: parse the string, find dots, replace those dotted variables by temporary
# "simple ones", fetch the value of those variables and add them (temporarily) to the _data
# dictionary passed to eval
#FIXME: it wont work if the data hasn't been fetched yet... this could
# happen if the eval node is the first one using this Record
# the next line is a workaround for the problem: it causes the resource to be loaded
#Pinky: Why not this ? eval(expr, browser) ?
# name = browser.name
# data_dict = browser._data[self.get_value(browser, 'id')]
return safe_eval(expr, {}, {'obj': record})
def parse_node(self, node, parent, browser, datas=None):
attrs = self.node_attrs_get(node)
if 'type' in attrs:
if attrs['type']=='field':
value = self.get_value(browser, attrs['name'])
#TODO: test this
if value == '' and 'default' in attrs:
value = attrs['default']
el = etree.SubElement(parent, node.tag)
el.text = tounicode(value)
#TODO: test this
for key, value in attrs.iteritems():
if key not in ('type', 'name', 'default'):
el.set(key, value)
elif attrs['type']=='attachment':
model = browser._name
value = self.get_value(browser, attrs['name'])
ids = self.pool['ir.attachment'].search(self.cr, self.uid, [('res_model','=',model),('res_id','=',int(value))])
datas = self.pool['ir.attachment'].read(self.cr, self.uid, ids)
if len(datas):
# if there are several, pick first
datas = datas[0]
fname = str(datas['datas_fname'])
ext = fname.split('.')[-1].lower()
if ext in ('jpg','jpeg', 'png'):
import base64
from StringIO import StringIO
dt = base64.decodestring(datas['datas'])
fp = StringIO()
fp.write(dt)
i = str(len(self.bin_datas))
self.bin_datas[i] = fp
el = etree.SubElement(parent, node.tag)
el.text = i
elif attrs['type']=='data':
#TODO: test this
txt = self.datas.get('form', {}).get(attrs['name'], '')
el = etree.SubElement(parent, node.tag)
el.text = txt
elif attrs['type']=='function':
if attrs['name'] in self.func:
txt = self.func[attrs['name']](node)
else:
txt = print_fnc.print_fnc(attrs['name'], node)
el = etree.SubElement(parent, node.tag)
el.text = txt
elif attrs['type']=='eval':
value = self.eval(browser, attrs['expr'])
el = etree.SubElement(parent, node.tag)
el.text = str(value)
elif attrs['type']=='fields':
fields = attrs['name'].split(',')
vals = {}
for b in browser:
value = tuple([self.get_value2(b, f) for f in fields])
if not value in vals:
vals[value]=[]
vals[value].append(b)
keys = vals.keys()
keys.sort()
if 'order' in attrs and attrs['order']=='desc':
keys.reverse()
v_list = [vals[k] for k in keys]
for v in v_list:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
elif attrs['type']=='call':
if len(attrs['args']):
#TODO: test this
# fetches the values of the variables which names where passed in the args attribute
args = [self.eval(browser, arg) for arg in attrs['args'].split(',')]
else:
args = []
# get the object
if 'model' in attrs:
obj = self.pool[attrs['model']]
else:
obj = browser # the record(set) is an instance of the model
# get the ids
if 'ids' in attrs:
ids = self.eval(browser, attrs['ids'])
else:
ids = browse.ids
# call the method itself
newdatas = getattr(obj, attrs['name'])(self.cr, self.uid, ids, *args)
def parse_result_tree(node, parent, datas):
if not node.tag == etree.Comment:
el = etree.SubElement(parent, node.tag)
atr = self.node_attrs_get(node)
if 'value' in atr:
if not isinstance(datas[atr['value']], (str, unicode)):
txt = str(datas[atr['value']])
else:
txt = datas[atr['value']]
el.text = txt
else:
for el_cld in node:
parse_result_tree(el_cld, el, datas)
if not isinstance(newdatas, (BaseModel, list)):
newdatas = [newdatas]
for newdata in newdatas:
parse_result_tree(node, parent, newdata)
elif attrs['type']=='zoom':
value = self.get_value(browser, attrs['name'])
if value:
if not isinstance(value, (BaseModel, list)):
v_list = [value]
else:
v_list = value
for v in v_list:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld, el, v)
else:
# if there is no "type" attribute in the node, copy it to the xml data and parse its children
if not node.tag == etree.Comment:
if node.tag == parent.tag:
el = parent
else:
el = etree.SubElement(parent, node.tag)
for el_cld in node:
self.parse_node(el_cld,el, browser)
def xml_get(self):
return etree.tostring(self.doc,encoding="utf-8",xml_declaration=True,pretty_print=True)
def parse_tree(self, ids, model, context=None):
if not context:
context={}
browser = self.pool[model].browse(self.cr, self.uid, ids, context)
self.parse_node(self.dom, self.doc, browser)
def parse_string(self, xml, ids, model, context=None):
if not context:
context={}
# parses the xml template to memory
self.dom = etree.XML(xml)
# create the xml data from the xml template
self.parse_tree(ids, model, context)
def parse(self, filename, ids, model, context=None):
if not context:
context={}
# parses the xml template to memory
src_file = tools.file_open(filename)
try:
self.dom = etree.XML(src_file.read())
self.doc = etree.Element(self.dom.tag)
self.parse_tree(ids, model, context)
finally:
src_file.close()
def close(self):
self.doc = None
self.dom = None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
caioserra/apiAdwords
|
refs/heads/master
|
examples/adspygoogle/dfp/v201311/line_item_service/get_line_items_by_statement.py
|
2
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all line items that need creatives for the given
order. The statement retrieves up to the maximum page size limit of 500. To
create line items, run create_line_items.py."""
__author__ = ('Jeff Sham',
'Vincent Tsao')
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201311')
# Set the id of the order to get line items from.
order_id = 'INSERT_ORDER_ID_HERE'
# Create statement object to only select line items that need creatives from a
# given order.
values = [{
'key': 'orderId',
'value': {
'xsi_type': 'NumberValue',
'value': order_id
}
}, {
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'NEEDS_CREATIVES'
}
}]
filter_statement = {
'query': 'WHERE orderId = :orderId AND status = :status LIMIT 500',
'values': values
}
# Get line items by statement.
response = line_item_service.GetLineItemsByStatement(filter_statement)[0]
line_items = []
if 'results' in response:
line_items = response['results']
# Display results.
for line_item in line_items:
print ('Line item with id \'%s\', belonging to order id \'%s\', and named '
'\'%s\' was found.' % (line_item['id'], line_item['orderId'],
line_item['name']))
print
print 'Number of results found: %s' % len(line_items)
|
micropython/micropython-esp32
|
refs/heads/esp32
|
tests/basics/object1.py
|
110
|
# test builtin object()
# creation
object()
# printing
print(repr(object())[:7])
|
ajs124/namebench
|
refs/heads/master
|
libnamebench/geoip.py
|
171
|
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class used for determining GeoIP location."""
import csv
import re
import tempfile
# external dependencies (from nb_third_party)
import httplib2
import math
import simplejson
import util
def GetFromGoogleLocAPI():
"""Use the Google Loc JSON API from Google Gears.
Returns:
A dictionary containing geolocation information
NOTE: This is in violation of the Gears Terms of Service. See:
http://code.google.com/p/gears/wiki/GeolocationAPI
"""
h = httplib2.Http(tempfile.gettempdir(), timeout=10)
url = 'http://www.google.com/loc/json'
post_data = {'request_address': 'true', 'version': '1.1.0', 'source': 'namebench'}
unused_resp, content = h.request(url, 'POST', simplejson.dumps(post_data))
try:
data = simplejson.loads(content)['location']
return {
'region_name': data['address'].get('region'),
'country_name': data['address'].get('country'),
'country_code': data['address'].get('country_code'),
'city': data['address'].get('city'),
'latitude': data['latitude'],
'longitude': data['longitude'],
'source': 'gloc'
}
except:
print '* Failed to use GoogleLocAPI: %s (content: %s)' % (util.GetLastExceptionString(), content)
return {}
def GetFromMaxmindJSAPI():
h = httplib2.Http(tempfile.gettempdir(), timeout=10)
unused_resp, content = h.request('http://j.maxmind.com/app/geoip.js', 'GET')
keep = ['region_name', 'country_name', 'city', 'latitude', 'longitude', 'country_code']
results = dict([x for x in re.findall("geoip_(.*?)\(.*?\'(.*?)\'", content) if x[0] in keep])
results.update({'source': 'mmind'})
if results:
return results
else:
return {}
def GetGeoData():
"""Get geodata from any means necessary. Sanitize as necessary."""
try:
json_data = GetFromGoogleLocAPI()
if not json_data:
json_data = GetFromMaxmindJSAPI()
# Make our data less accurate. We don't need any more than that.
json_data['latitude'] = '%.3f' % float(json_data['latitude'])
json_data['longitude'] = '%.3f' % float(json_data['longitude'])
return json_data
except:
print 'Failed to get Geodata: %s' % util.GetLastExceptionString()
return {}
def GetInfoForCountry(country_name_or_code):
"""Get code, name, lat and lon for a given country name or code."""
match = False
partial_match = False
if not country_name_or_code:
return None
if len(country_name_or_code) == 2:
country_code = country_name_or_code.upper()
country_name = False
else:
country_name = country_name_or_code
country_code = False
for row in ReadCountryData():
lat, lon = row['coords'].split(',')
if country_code:
if row['code'] == country_code:
return row['code'], row['name'], lat, lon
elif country_name:
if re.match("^%s$" % country_name, row['name'], re.I):
return row['code'], row['name'], lat, lon
elif re.search('^%s \(' % country_name, row['name'], re.I):
return row['code'], row['name'], lat, lon
elif re.search('\(%s\)' % country_name, row['name'], re.I):
return row['code'], row['name'], lat, lon
elif re.match("^%s" % country_name, row['name'], re.I):
match = (row['code'], row['name'], lat, lon)
elif re.search(country_name, row['name'], re.I):
partial_match = (row['code'], row['name'], lat, lon)
if match:
print "Could not find explicit entry for '%s', good match: %s" % (country_name_or_code, match)
return match
elif partial_match:
print "Could not find explicit entry for '%s', partial match: %s" % (country_name_or_code, partial_match)
return partial_match
else:
print "'%s' does not match any countries in our list." % country_name_or_code
return (None, None, None, None)
def ReadCountryData(filename='data/countries.csv'):
"""Read country data file, yielding rows of information."""
country_file = util.FindDataFile(filename)
for row in csv.DictReader(open(country_file), fieldnames=['name', 'code', 'coords']):
yield row
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/lib2to3/tests/test_util.py
|
126
|
""" Test suite for the code in fixer_util """
# Testing imports
from . import support
# Python imports
import os.path
# Local imports
from lib2to3.pytree import Node, Leaf
from lib2to3 import fixer_util
from lib2to3.fixer_util import Attr, Name, Call, Comma
from lib2to3.pgen2 import token
def parse(code, strip_levels=0):
# The topmost node is file_input, which we don't care about.
# The next-topmost node is a *_stmt node, which we also don't care about
tree = support.parse_string(code)
for i in range(strip_levels):
tree = tree.children[0]
tree.parent = None
return tree
class MacroTestCase(support.TestCase):
def assertStr(self, node, string):
if isinstance(node, (tuple, list)):
node = Node(fixer_util.syms.simple_stmt, node)
self.assertEqual(str(node), string)
class Test_is_tuple(support.TestCase):
def is_tuple(self, string):
return fixer_util.is_tuple(parse(string, strip_levels=2))
def test_valid(self):
self.assertTrue(self.is_tuple("(a, b)"))
self.assertTrue(self.is_tuple("(a, (b, c))"))
self.assertTrue(self.is_tuple("((a, (b, c)),)"))
self.assertTrue(self.is_tuple("(a,)"))
self.assertTrue(self.is_tuple("()"))
def test_invalid(self):
self.assertFalse(self.is_tuple("(a)"))
self.assertFalse(self.is_tuple("('foo') % (b, c)"))
class Test_is_list(support.TestCase):
def is_list(self, string):
return fixer_util.is_list(parse(string, strip_levels=2))
def test_valid(self):
self.assertTrue(self.is_list("[]"))
self.assertTrue(self.is_list("[a]"))
self.assertTrue(self.is_list("[a, b]"))
self.assertTrue(self.is_list("[a, [b, c]]"))
self.assertTrue(self.is_list("[[a, [b, c]],]"))
def test_invalid(self):
self.assertFalse(self.is_list("[]+[]"))
class Test_Attr(MacroTestCase):
def test(self):
call = parse("foo()", strip_levels=2)
self.assertStr(Attr(Name("a"), Name("b")), "a.b")
self.assertStr(Attr(call, Name("b")), "foo().b")
def test_returns(self):
attr = Attr(Name("a"), Name("b"))
self.assertEqual(type(attr), list)
class Test_Name(MacroTestCase):
def test(self):
self.assertStr(Name("a"), "a")
self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
self.assertStr(Name("a", prefix="b"), "ba")
class Test_Call(MacroTestCase):
def _Call(self, name, args=None, prefix=None):
"""Help the next test"""
children = []
if isinstance(args, list):
for arg in args:
children.append(arg)
children.append(Comma())
children.pop()
return Call(Name(name), children, prefix)
def test(self):
kids = [None,
[Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 2),
Leaf(token.NUMBER, 3)],
[Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 3),
Leaf(token.NUMBER, 2), Leaf(token.NUMBER, 4)],
[Leaf(token.STRING, "b"), Leaf(token.STRING, "j", prefix=" ")]
]
self.assertStr(self._Call("A"), "A()")
self.assertStr(self._Call("b", kids[1]), "b(1,2,3)")
self.assertStr(self._Call("a.b().c", kids[2]), "a.b().c(1,3,2,4)")
self.assertStr(self._Call("d", kids[3], prefix=" "), " d(b, j)")
class Test_does_tree_import(support.TestCase):
def _find_bind_rec(self, name, node):
# Search a tree for a binding -- used to find the starting
# point for these tests.
c = fixer_util.find_binding(name, node)
if c: return c
for child in node.children:
c = self._find_bind_rec(name, child)
if c: return c
def does_tree_import(self, package, name, string):
node = parse(string)
# Find the binding of start -- that's what we'll go from
node = self._find_bind_rec('start', node)
return fixer_util.does_tree_import(package, name, node)
def try_with(self, string):
failing_tests = (("a", "a", "from a import b"),
("a.d", "a", "from a.d import b"),
("d.a", "a", "from d.a import b"),
(None, "a", "import b"),
(None, "a", "import b, c, d"))
for package, name, import_ in failing_tests:
n = self.does_tree_import(package, name, import_ + "\n" + string)
self.assertFalse(n)
n = self.does_tree_import(package, name, string + "\n" + import_)
self.assertFalse(n)
passing_tests = (("a", "a", "from a import a"),
("x", "a", "from x import a"),
("x", "a", "from x import b, c, a, d"),
("x.b", "a", "from x.b import a"),
("x.b", "a", "from x.b import b, c, a, d"),
(None, "a", "import a"),
(None, "a", "import b, c, a, d"))
for package, name, import_ in passing_tests:
n = self.does_tree_import(package, name, import_ + "\n" + string)
self.assertTrue(n)
n = self.does_tree_import(package, name, string + "\n" + import_)
self.assertTrue(n)
def test_in_function(self):
self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
class Test_find_binding(support.TestCase):
def find_binding(self, name, string, package=None):
return fixer_util.find_binding(name, parse(string), package)
def test_simple_assignment(self):
self.assertTrue(self.find_binding("a", "a = b"))
self.assertTrue(self.find_binding("a", "a = [b, c, d]"))
self.assertTrue(self.find_binding("a", "a = foo()"))
self.assertTrue(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
self.assertFalse(self.find_binding("a", "foo = a"))
self.assertFalse(self.find_binding("a", "foo = (a, b, c)"))
def test_tuple_assignment(self):
self.assertTrue(self.find_binding("a", "(a,) = b"))
self.assertTrue(self.find_binding("a", "(a, b, c) = [b, c, d]"))
self.assertTrue(self.find_binding("a", "(c, (d, a), b) = foo()"))
self.assertTrue(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
self.assertFalse(self.find_binding("a", "(foo, b) = (b, a)"))
self.assertFalse(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
def test_list_assignment(self):
self.assertTrue(self.find_binding("a", "[a] = b"))
self.assertTrue(self.find_binding("a", "[a, b, c] = [b, c, d]"))
self.assertTrue(self.find_binding("a", "[c, [d, a], b] = foo()"))
self.assertTrue(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
self.assertFalse(self.find_binding("a", "[foo, b] = (b, a)"))
self.assertFalse(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
def test_invalid_assignments(self):
self.assertFalse(self.find_binding("a", "foo.a = 5"))
self.assertFalse(self.find_binding("a", "foo[a] = 5"))
self.assertFalse(self.find_binding("a", "foo(a) = 5"))
self.assertFalse(self.find_binding("a", "foo(a, b) = 5"))
def test_simple_import(self):
self.assertTrue(self.find_binding("a", "import a"))
self.assertTrue(self.find_binding("a", "import b, c, a, d"))
self.assertFalse(self.find_binding("a", "import b"))
self.assertFalse(self.find_binding("a", "import b, c, d"))
def test_from_import(self):
self.assertTrue(self.find_binding("a", "from x import a"))
self.assertTrue(self.find_binding("a", "from a import a"))
self.assertTrue(self.find_binding("a", "from x import b, c, a, d"))
self.assertTrue(self.find_binding("a", "from x.b import a"))
self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d"))
self.assertFalse(self.find_binding("a", "from a import b"))
self.assertFalse(self.find_binding("a", "from a.d import b"))
self.assertFalse(self.find_binding("a", "from d.a import b"))
def test_import_as(self):
self.assertTrue(self.find_binding("a", "import b as a"))
self.assertTrue(self.find_binding("a", "import b as a, c, a as f, d"))
self.assertFalse(self.find_binding("a", "import a as f"))
self.assertFalse(self.find_binding("a", "import b, c as f, d as e"))
def test_from_import_as(self):
self.assertTrue(self.find_binding("a", "from x import b as a"))
self.assertTrue(self.find_binding("a", "from x import g as a, d as b"))
self.assertTrue(self.find_binding("a", "from x.b import t as a"))
self.assertTrue(self.find_binding("a", "from x.b import g as a, d"))
self.assertFalse(self.find_binding("a", "from a import b as t"))
self.assertFalse(self.find_binding("a", "from a.d import b as t"))
self.assertFalse(self.find_binding("a", "from d.a import b as t"))
def test_simple_import_with_package(self):
self.assertTrue(self.find_binding("b", "import b"))
self.assertTrue(self.find_binding("b", "import b, c, d"))
self.assertFalse(self.find_binding("b", "import b", "b"))
self.assertFalse(self.find_binding("b", "import b, c, d", "c"))
def test_from_import_with_package(self):
self.assertTrue(self.find_binding("a", "from x import a", "x"))
self.assertTrue(self.find_binding("a", "from a import a", "a"))
self.assertTrue(self.find_binding("a", "from x import *", "x"))
self.assertTrue(self.find_binding("a", "from x import b, c, a, d", "x"))
self.assertTrue(self.find_binding("a", "from x.b import a", "x.b"))
self.assertTrue(self.find_binding("a", "from x.b import *", "x.b"))
self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
self.assertFalse(self.find_binding("a", "from a import b", "a"))
self.assertFalse(self.find_binding("a", "from a.d import b", "a.d"))
self.assertFalse(self.find_binding("a", "from d.a import b", "a.d"))
self.assertFalse(self.find_binding("a", "from x.y import *", "a.b"))
def test_import_as_with_package(self):
self.assertFalse(self.find_binding("a", "import b.c as a", "b.c"))
self.assertFalse(self.find_binding("a", "import a as f", "f"))
self.assertFalse(self.find_binding("a", "import a as f", "a"))
def test_from_import_as_with_package(self):
# Because it would take a lot of special-case code in the fixers
# to deal with from foo import bar as baz, we'll simply always
# fail if there is an "from ... import ... as ..."
self.assertFalse(self.find_binding("a", "from x import b as a", "x"))
self.assertFalse(self.find_binding("a", "from x import g as a, d as b", "x"))
self.assertFalse(self.find_binding("a", "from x.b import t as a", "x.b"))
self.assertFalse(self.find_binding("a", "from x.b import g as a, d", "x.b"))
self.assertFalse(self.find_binding("a", "from a import b as t", "a"))
self.assertFalse(self.find_binding("a", "from a import b as t", "b"))
self.assertFalse(self.find_binding("a", "from a import b as t", "t"))
def test_function_def(self):
self.assertTrue(self.find_binding("a", "def a(): pass"))
self.assertTrue(self.find_binding("a", "def a(b, c, d): pass"))
self.assertTrue(self.find_binding("a", "def a(): b = 7"))
self.assertFalse(self.find_binding("a", "def d(b, (c, a), e): pass"))
self.assertFalse(self.find_binding("a", "def d(a=7): pass"))
self.assertFalse(self.find_binding("a", "def d(a): pass"))
self.assertFalse(self.find_binding("a", "def d(): a = 7"))
s = """
def d():
def a():
pass"""
self.assertFalse(self.find_binding("a", s))
def test_class_def(self):
self.assertTrue(self.find_binding("a", "class a: pass"))
self.assertTrue(self.find_binding("a", "class a(): pass"))
self.assertTrue(self.find_binding("a", "class a(b): pass"))
self.assertTrue(self.find_binding("a", "class a(b, c=8): pass"))
self.assertFalse(self.find_binding("a", "class d: pass"))
self.assertFalse(self.find_binding("a", "class d(a): pass"))
self.assertFalse(self.find_binding("a", "class d(b, a=7): pass"))
self.assertFalse(self.find_binding("a", "class d(b, *a): pass"))
self.assertFalse(self.find_binding("a", "class d(b, **a): pass"))
self.assertFalse(self.find_binding("a", "class d: a = 7"))
s = """
class d():
class a():
pass"""
self.assertFalse(self.find_binding("a", s))
def test_for(self):
self.assertTrue(self.find_binding("a", "for a in r: pass"))
self.assertTrue(self.find_binding("a", "for a, b in r: pass"))
self.assertTrue(self.find_binding("a", "for (a, b) in r: pass"))
self.assertTrue(self.find_binding("a", "for c, (a,) in r: pass"))
self.assertTrue(self.find_binding("a", "for c, (a, b) in r: pass"))
self.assertTrue(self.find_binding("a", "for c in r: a = c"))
self.assertFalse(self.find_binding("a", "for c in a: pass"))
def test_for_nested(self):
s = """
for b in r:
for a in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for a, c in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for (a, c) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for (a,) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c, (a, d) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c in b:
a = 7"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c in b:
d = a"""
self.assertFalse(self.find_binding("a", s))
s = """
for b in r:
for c in a:
d = 7"""
self.assertFalse(self.find_binding("a", s))
def test_if(self):
self.assertTrue(self.find_binding("a", "if b in r: a = c"))
self.assertFalse(self.find_binding("a", "if a in r: d = e"))
def test_if_nested(self):
s = """
if b in r:
if c in d:
a = c"""
self.assertTrue(self.find_binding("a", s))
s = """
if b in r:
if c in d:
c = a"""
self.assertFalse(self.find_binding("a", s))
def test_while(self):
self.assertTrue(self.find_binding("a", "while b in r: a = c"))
self.assertFalse(self.find_binding("a", "while a in r: d = e"))
def test_while_nested(self):
s = """
while b in r:
while c in d:
a = c"""
self.assertTrue(self.find_binding("a", s))
s = """
while b in r:
while c in d:
c = a"""
self.assertFalse(self.find_binding("a", s))
def test_try_except(self):
s = """
try:
a = 6
except:
b = 8"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except KeyError:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 6"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_nested(self):
s = """
try:
try:
a = 6
except:
pass
except:
b = 8"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
try:
a = 6
except:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
try:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
try:
b = 8
except KeyError:
pass
except:
a = 6
except:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
pass
except:
try:
b = 8
except KeyError:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 6"""
self.assertFalse(self.find_binding("a", s))
s = """
try:
try:
b = 8
except:
c = d
except:
try:
b = 6
except:
t = 8
except:
o = y"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_finally(self):
s = """
try:
c = 6
except:
b = 8
finally:
a = 9"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
b = 6"""
self.assertFalse(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 9
finally:
b = 6"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_finally_nested(self):
s = """
try:
c = 6
except:
b = 8
finally:
try:
a = 9
except:
b = 9
finally:
c = 9"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
try:
pass
finally:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
try:
b = 6
finally:
b = 7"""
self.assertFalse(self.find_binding("a", s))
class Test_touch_import(support.TestCase):
def test_after_docstring(self):
node = parse('"""foo"""\nbar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
def test_after_imports(self):
node = parse('"""foo"""\nimport bar\nbar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
def test_beginning(self):
node = parse('bar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), 'import foo\nbar()\n\n')
def test_from_import(self):
node = parse('bar()')
fixer_util.touch_import("html", "escape", node)
self.assertEqual(str(node), 'from html import escape\nbar()\n\n')
def test_name_import(self):
node = parse('bar()')
fixer_util.touch_import(None, "cgi", node)
self.assertEqual(str(node), 'import cgi\nbar()\n\n')
class Test_find_indentation(support.TestCase):
def test_nothing(self):
fi = fixer_util.find_indentation
node = parse("node()")
self.assertEqual(fi(node), u"")
node = parse("")
self.assertEqual(fi(node), u"")
def test_simple(self):
fi = fixer_util.find_indentation
node = parse("def f():\n x()")
self.assertEqual(fi(node), u"")
self.assertEqual(fi(node.children[0].children[4].children[2]), u" ")
node = parse("def f():\n x()\n y()")
self.assertEqual(fi(node.children[0].children[4].children[4]), u" ")
|
chris-x86-64/sansyuyu_bot
|
refs/heads/master
|
sanchan/__init__.py
|
12133432
| |
mbauskar/phrerp
|
refs/heads/develop
|
erpnext/accounts/report/sales_register/__init__.py
|
12133432
| |
nikolas/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/conf/locale/bn/__init__.py
|
12133432
| |
bendk/thesquirrel
|
refs/heads/master
|
editor/templatetags/__init__.py
|
12133432
| |
junhuac/MQUIC
|
refs/heads/master
|
src/tools/gyp/test/actions-multiple/src/filter.py
|
349
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
data = open(sys.argv[3], 'r').read()
fh = open(sys.argv[4], 'w')
fh.write(data.replace(sys.argv[1], sys.argv[2]))
fh.close()
|
suhailvs/django-quran
|
refs/heads/master
|
quran/data/__init__.py
|
1
|
import re
import unittest
from os import path
import os
from xml.dom.minidom import parse, parseString
from django.db import transaction
from quran.models import *
from quran.buckwalter import *
def path_to(fn):
return path.join(path.dirname(__file__), fn)
@transaction.atomic
def import_quran():
d = parse(path_to('tanzil/quran-data.xml'))
d2 = parse(path_to('tanzil/quran-uthmani.xml'))
suras = d.getElementsByTagName('sura')
for s in suras:
index = int(s.getAttribute('index'))
ayas = s.getAttribute('ayas')
start = int(s.getAttribute('start'))
name = s.getAttribute('name')
tname = s.getAttribute('tname')
ename = s.getAttribute('ename')
type = s.getAttribute('type')
order = int(s.getAttribute('order'))
rukus = int(s.getAttribute('rukus'))
sura_model = Sura(number=index, name=name, tname=tname, ename=ename, type=type, order=order, rukus=rukus)
sura = d2.getElementsByTagName('sura')[index - 1]
assert int(sura.getAttribute('index')) == sura_model.number
sura_model.save()
ayas = sura.getElementsByTagName('aya')
bismillah = ayas[0].getAttribute('bismillah')
for aya in ayas:
index = int(aya.getAttribute('index'))
text = aya.getAttribute('text')
aya_model = Aya(sura=sura_model, number=index, text=text)
aya_model.save()
print ("%d:%d" % (sura_model.number, index))
@transaction.atomic
def import_translation_txt(path, translation):
print ("Importing %s translation" % (translation.name))
f = open(path)
ayas = Aya.objects.all()
for aya in ayas:
line = f.readline()
if len(line) <= 1:
raise Exception('Translation file [%s] ended preemtively on aya %d:%d' % (path, aya.sura_id, aya.number))
line = line.strip()
t = TranslatedAya(sura=aya.sura, aya=aya, translation=translation, text=line)
t.save()
print ("[%s] %d:%d" % (translation.name, aya.sura_id, aya.number))
def import_translations():
translator_data = open(path_to('zekr/translator_data.txt'))
for line in translator_data.readlines():
name,translator,source_name,source_url,filename = line.strip().split(';')
translation = QuranTranslation(name=name,translator=translator, source_name=source_name, source_url=source_url)
translation.save()
import_translation_txt(path_to('zekr/%s' % filename), translation)
def extract_lem(morphology):
p = re.compile('LEM:(?P<lem>[^ "]+)')
m = p.search(morphology)
r = None
word = None
if(m):
r = buckwalter_to_unicode(m.group('lem'))
return r
def extract_root(morphology):
p = re.compile('ROOT:(?P<root>[^ "]+)')
m = p.search(morphology)
r = None
root = None
if m:
r = buckwalter_to_unicode(m.group('root'))
if r:
try:
root = Root.objects.get(letters=r)
except Root.DoesNotExist:
root = Root(letters=r)
root.save()
return root
def import_morphology_xml():
d = parse(path_to('corpus/quranic-corpus-morphology-0.2.xml'))
suras = d.getElementsByTagName('chapter')
for s in suras:
sura_number = int(s.getAttribute('number'))
sura = Sura.objects.get(number=sura_number)
ayas = s.getElementsByTagName('verse')
for a in ayas:
aya_number = int(a.getAttribute('number'))
aya = Aya.objects.get(sura=sura, number=aya_number)
words = a.getElementsByTagName('word')
for w in words:
number = int(w.getAttribute('number'))
token = w.getAttribute('token')
morphology = w.getAttribute('morphology')
lemma = None
dtoken = token
root = extract_root(morphology)
lem = extract_lem(morphology)
if lem: dtoken = lem
try:
lemma = Lemma.objects.get(token=dtoken)
except Lemma.DoesNotExist:
lemma = Lemma(token=dtoken, root=root)
lemma.save()
word = Word(sura=sura, aya=aya, number=number, token=token, root=root, lemma=lemma)
word.save()
print ("[morphology] %d:%d" % (sura.number, aya.number))
def import_morphology_txt():
sura = Sura.objects.get(number=2)
aya = Aya.objects.get(sura=sura, number=2) # any aya except the first.
f = open(path_to('corpus/quranic-corpus-morphology-0.2.txt'))
line = f.readline()
while len(line) > 0:
parts = line.strip().split('|')
sura_number = 0
try:
sura_number = int(parts[0])
except ValueError:
line = f.readline()
continue
aya_number = int(parts[1])
word_number = int(parts[2])
token = parts[3]
morphology = parts[4]
if aya_number is not aya.number:
if sura_number is not sura.number:
sura = Sura.objects.get(number=sura_number)
aya = Aya.objects.get(sura=sura, number=aya_number)
print ("[morphology] %d:%d" % (sura.number, aya.number))
lemma = None
dtoken = token
root = extract_root(morphology)
lem = extract_lem(morphology)
if lem: dtoken = lem
try:
lemma = Lemma.objects.get(token=dtoken)
except Lemma.DoesNotExist:
lemma = Lemma(token=dtoken, root=root)
lemma.save()
word = Word(sura=sura, aya=aya, number=word_number, token=token, root=root, lemma=lemma)
word.save()
line = f.readline()
def import_morphology():
return import_morphology_txt()
def import_word_translations():
f = open(path_to('corpus/word_by_word_meaning.txt'))
for line in f:
parts = line.strip().split('|')
sura_number = 0
try:
sura_number = int(parts[0])
except ValueError:
continue
aya_number = int(parts[1])
word_number = int(parts[2])
ename = parts[3]
translation = parts[4]
cur_word = Word.objects.get(sura__number=sura_number, aya__number=aya_number, number=word_number)
cur_word.ename = ename
cur_word.translation = translation
cur_word.save()
if word_number == 1: print ('%s:%s' %(parts[0],parts[1]))
def test_data(verbosity):
verbosity = int(verbosity)
print (verbosity)
test_suite = unittest.TestLoader().loadTestsFromTestCase(DataIntegrityTestCase)
unittest.TextTestRunner(verbosity=verbosity).run(test_suite)
class DataIntegrityTestCase(unittest.TestCase):
def check_word(self, sura_number, aya_number, word_number, expected_word):
sura = Sura.objects.get(number=sura_number)
aya = sura.ayas.get(number=aya_number)
word = aya.words.get(number=word_number)
self.assertEquals(word.token, buckwalter_to_unicode(expected_word))
def test_first_ayas(self):
"""
Test the first ayas of some suras
"""
self.check_word(1, 1, 3, u'{lr~aHoma`ni')
self.check_word(2, 1, 1, u'Al^m^')
self.check_word(114, 1, 1, u'qulo')
def test_last_ayas(self):
"""
Test the last ayas of some suras
"""
self.check_word(1, 7, 2, u'{l~a*iyna')
self.check_word(2, 286, 49, u'{loka`firiyna')
self.check_word(114, 6, 3, u'wa{ln~aAsi')
def test_yusuf_ali(self):
"""
Test some ayas against Yusuf Ali
"""
sura_number = 112
aya_number = 4
sura = Sura.objects.get(number=sura_number)
aya = sura.ayas.get(number=aya_number)
translation = QuranTranslation.objects.get(name='Yusuf Ali')
t = aya.translations.get(translation=translation)
self.assertEquals(t.text, 'And there is none like unto Him.')
|
juanalfonsopr/odoo
|
refs/heads/8.0
|
addons/pos_discount/discount.py
|
315
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class pos_config(osv.osv):
_inherit = 'pos.config'
_columns = {
'discount_pc': fields.float('Discount Percentage', help='The discount percentage'),
'discount_product_id': fields.many2one('product.product','Discount Product', help='The product used to model the discount'),
}
_defaults = {
'discount_pc': 10,
}
|
jcurbelo/networkx
|
refs/heads/master
|
networkx/algorithms/tests/test_distance_regular.py
|
87
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestDistanceRegular:
def test_is_distance_regular(self):
assert_true(nx.is_distance_regular(nx.icosahedral_graph()))
assert_true(nx.is_distance_regular(nx.petersen_graph()))
assert_true(nx.is_distance_regular(nx.cubical_graph()))
assert_true(nx.is_distance_regular(nx.complete_bipartite_graph(3,3)))
assert_true(nx.is_distance_regular(nx.tetrahedral_graph()))
assert_true(nx.is_distance_regular(nx.dodecahedral_graph()))
assert_true(nx.is_distance_regular(nx.pappus_graph()))
assert_true(nx.is_distance_regular(nx.heawood_graph()))
assert_true(nx.is_distance_regular(nx.cycle_graph(3)))
# no distance regular
assert_false(nx.is_distance_regular(nx.path_graph(4)))
def test_not_connected(self):
G=nx.cycle_graph(4)
G.add_cycle([5,6,7])
assert_false(nx.is_distance_regular(G))
def test_global_parameters(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 0, 1), (1, 1, 0)])
b,c=nx.intersection_array(nx.cycle_graph(3))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 1, 0)])
def test_intersection_array(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
assert_equal(b,[2, 1])
assert_equal(c,[1, 1])
b,c=nx.intersection_array(nx.dodecahedral_graph())
assert_equal(b,[3, 2, 1, 1, 1])
assert_equal(c,[1, 1, 1, 2, 3])
b,c=nx.intersection_array(nx.icosahedral_graph())
assert_equal(b,[5, 2, 1])
assert_equal(c,[1, 2, 5])
|
ChristianKniep/QNIB
|
refs/heads/master
|
serverfiles/usr/local/lib/networkx-1.6/build/lib/networkx/algorithms/tests/test_distance_regular.py
|
87
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestDistanceRegular:
def test_is_distance_regular(self):
assert_true(nx.is_distance_regular(nx.icosahedral_graph()))
assert_true(nx.is_distance_regular(nx.petersen_graph()))
assert_true(nx.is_distance_regular(nx.cubical_graph()))
assert_true(nx.is_distance_regular(nx.complete_bipartite_graph(3,3)))
assert_true(nx.is_distance_regular(nx.tetrahedral_graph()))
assert_true(nx.is_distance_regular(nx.dodecahedral_graph()))
assert_true(nx.is_distance_regular(nx.pappus_graph()))
assert_true(nx.is_distance_regular(nx.heawood_graph()))
assert_true(nx.is_distance_regular(nx.cycle_graph(3)))
# no distance regular
assert_false(nx.is_distance_regular(nx.path_graph(4)))
def test_not_connected(self):
G=nx.cycle_graph(4)
G.add_cycle([5,6,7])
assert_false(nx.is_distance_regular(G))
def test_global_parameters(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 0, 1), (1, 1, 0)])
b,c=nx.intersection_array(nx.cycle_graph(3))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 1, 0)])
def test_intersection_array(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
assert_equal(b,[2, 1])
assert_equal(c,[1, 1])
b,c=nx.intersection_array(nx.dodecahedral_graph())
assert_equal(b,[3, 2, 1, 1, 1])
assert_equal(c,[1, 1, 1, 2, 3])
b,c=nx.intersection_array(nx.icosahedral_graph())
assert_equal(b,[5, 2, 1])
assert_equal(c,[1, 2, 5])
|
lingdb/CoBL-public
|
refs/heads/master
|
ielex/lexicon/migrations/0039_languagebranches_to_sndcomp.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
LanguageBranches = apps.get_model('lexicon', 'LanguageBranches')
SndComp = apps.get_model('lexicon', 'SndComp')
print('Creating SndComp from LanguageBranches.')
for lb in LanguageBranches.objects.all():
sndComp = SndComp(
lgSetName=lb.level1_branch_name,
lv0=lb.family_ix,
lv1=lb.level1_branch_ix,
lv2=lb.level2_branch_ix,
lv3=lb.level3_branch_ix,
cladeLevel0=lb.cladeLevel0,
cladeLevel1=lb.cladeLevel1,
cladeLevel2=lb.cladeLevel2,
cladeLevel3=lb.cladeLevel3)
sndComp.save()
print('Created %s SndComp entries.' % SndComp.objects.count())
def reverse_func(apps, schema_editor):
# Delete all clades:
SndComp = apps.get_model('lexicon', 'SndComp')
print('Deleting all SndComp models.')
for s in SndComp.objects.all():
s.delete()
class Migration(migrations.Migration):
dependencies = [('lexicon', '0038_sndcomp')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
garbled1/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/list.py
|
53
|
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: list
author: Ansible core team
version_added: "2.0"
short_description: simply returns what it is given.
description:
- this is mostly a noop, to be used as a with_list loop when you dont want the content transformed in any way.
"""
EXAMPLES = """
- name: unlike with_items you will get 3 items from this loop, the 2nd one being a list
debug: var=item
with_list:
- 1
- [2,3]
- 4
"""
RETURN = """
_list:
description: basically the same as you fed in
"""
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
return terms
|
exofudge/Pocket-Casts
|
refs/heads/master
|
setup.py
|
1
|
import pocketcasts
from setuptools import setup, find_packages
setup(
name="pocketcasts-api",
version=pocketcasts.api.__version__,
description=pocketcasts.api.__doc__,
url=pocketcasts.api.__url__,
author=pocketcasts.api.__author__,
author_email='ferguslongley@live.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
packages=find_packages(exclude=['testing']),
keywords='podcasts pocketcasts',
install_requires=['requests'],
)
|
mejedi/tarantool
|
refs/heads/1.7
|
test/box-py/snapshot.test.py
|
2
|
import os
import sys
import yaml
import time
from signal import SIGUSR1
sys.stdout.push_filter(server.vardir, "<dir>")
admin("space = box.schema.space.create('tweedledum', { id = 0 })")
admin("index = space:create_index('primary', { type = 'hash' })")
print """#
# A test case for: http://bugs.launchpad.net/bugs/686411
# Check that 'box.snapshot()' does not overwrite a snapshot
# file that already exists. Verify also that any other
# error that happens when saving snapshot is propagated
# to the caller.
"""
admin("space:insert{1, 'first tuple'}")
admin("box.snapshot()")
# In absence of data modifications, two consecutive
# 'box.snapshot()' statements will try to write
# into the same file, since file name is based
# on LSN.
# Don't allow to overwrite snapshots.
admin("_, e = pcall(box.snapshot)")
admin("e.type")
admin("e.errno")
#
# Increment LSN
admin("space:insert{2, 'second tuple'}")
#
# Check for other errors, e.g. "Permission denied".
print "# Make 'var' directory read-only."
data_dir = os.path.join(server.vardir, server.name)
os.chmod(data_dir, 0555)
admin("_, e = pcall(box.snapshot)")
admin("e.type")
admin("e.errno")
# cleanup
os.chmod(data_dir, 0755)
admin("space:delete{1}")
admin("space:delete{2}")
print """#
# A test case for http://bugs.launchpad.net/bugs/727174
# "tarantool_box crashes when saving snapshot on SIGUSR1"
#"""
print """
# Increment the lsn number, to make sure there is no such snapshot yet
#"""
admin("space:insert{1, 'Test tuple'}")
pid = int(yaml.load(admin("box.info.pid", silent=True))[0])
lsn = int(yaml.load(admin("box.info.server.lsn", silent=True))[0])
snapshot = str(lsn).zfill(20) + ".snap"
snapshot = os.path.join(os.path.join(server.vardir, server.name), snapshot)
iteration = 0
MAX_ITERATIONS = 100
while not os.access(snapshot, os.F_OK) and iteration < MAX_ITERATIONS:
if iteration % 10 == 0:
os.kill(pid, SIGUSR1)
time.sleep(0.01)
iteration = iteration + 1
if iteration == 0 or iteration >= MAX_ITERATIONS:
print "Snapshot is missing."
else:
print "Snapshot exists."
admin("space:drop()")
sys.stdout.pop_filter()
|
Amechi101/concepteur-market-app
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/conf/locale/he/__init__.py
|
12133432
| |
tdickers/mitmproxy
|
refs/heads/master
|
test/mitmproxy/script/__init__.py
|
12133432
| |
blaze33/django
|
refs/heads/ticket_19456
|
django/template/response.py
|
221
|
from django.http import HttpResponse
from django.template import loader, Context, RequestContext
from django.utils import six
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
mimetype=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use tricky-to-debug problems
self.template_name = template
self.context_data = context
self._post_render_callbacks = []
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status,
mimetype)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = super(SimpleTemplateResponse, self).__getstate__()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template)
elif isinstance(template, six.string_types):
return loader.get_template(template)
else:
return template
def resolve_context(self, context):
"""Converts context data into a full Context object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return Context(context)
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + \
['_request', '_current_app']
def __init__(self, request, template, context=None, content_type=None,
status=None, mimetype=None, current_app=None):
# self.request gets over-written by django.test.client.Client - and
# unlike context_data and template_name the _request should not
# be considered part of the public API.
self._request = request
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
self._current_app = current_app
super(TemplateResponse, self).__init__(
template, context, content_type, status, mimetype)
def resolve_context(self, context):
"""Convert context data into a full RequestContext object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
return RequestContext(self._request, context, current_app=self._current_app)
|
shirou/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/facts.py
|
6
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
import array
import errno
import fcntl
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import ConfigParser
import StringIO
from string import maketrans
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import json
except ImportError:
import simplejson as json
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
_I386RE = re.compile(r'i[3456]86')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_DICT = { '/etc/redhat-release': 'RedHat',
'/etc/vmware-release': 'VMwareESX',
'/etc/openwrt_release': 'OpenWrt',
'/etc/system-release': 'OtherLinux',
'/etc/alpine-release': 'Alpine',
'/etc/release': 'Solaris',
'/etc/arch-release': 'Archlinux',
'/etc/SuSE-release': 'SuSE',
'/etc/gentoo-release': 'Gentoo',
'/etc/os-release': 'Debian' }
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
]
def __init__(self):
self.facts = {}
self.get_platform_facts()
self.get_distribution_facts()
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_pkg_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'Linux':
self.get_distribution_facts()
elif self.facts['system'] == 'AIX':
rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
data = out.split('\n')
self.facts['architecture'] = data[0]
def get_local_facts(self):
fact_path = module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
rc, out, err = module.run_command(fn)
else:
out = open(fn).read()
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError, e:
# load raw ini
cp = ConfigParser.ConfigParser()
try:
cp.readfp(StringIO.StringIO(out))
except ConfigParser.Error, e:
fact="error loading fact - please check content"
else:
fact = {}
#print cp.sections()
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
# platform.dist() is deprecated in 2.6
# in 2.6 and newer, you should use platform.linux_distribution()
def get_distribution_facts(self):
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', SLES = 'Suse',
SLED = 'Suse', OpenSuSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
)
if self.facts['system'] == 'AIX':
self.facts['distribution'] = 'AIX'
rc, out, err = module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
elif self.facts['system'] == 'HP-UX':
self.facts['distribution'] = 'HP-UX'
rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
elif self.facts['system'] == 'Darwin':
self.facts['distribution'] = 'MacOSX'
rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
elif self.facts['system'] == 'FreeBSD':
self.facts['distribution'] = 'FreeBSD'
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
elif self.facts['system'] == 'OpenBSD':
self.facts['distribution'] = 'OpenBSD'
self.facts['distribution_release'] = platform.release()
rc, out, err = module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_version'] = match.groups()[0]
else:
self.facts['distribution_version'] = 'release'
else:
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
for (path, name) in Facts.OSDIST_DICT.items():
if os.path.exists(path) and os.path.getsize(path) > 0:
if self.facts['distribution'] == 'Fedora':
pass
elif name == 'RedHat':
data = get_file_content(path)
if 'Red Hat' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
elif name == 'OtherLinux':
data = get_file_content(path)
if 'Amazon' in data:
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
elif name == 'OpenWrt':
data = get_file_content(path)
if 'OpenWrt' in data:
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif name == 'Alpine':
data = get_file_content(path)
self.facts['distribution'] = 'Alpine'
self.facts['distribution_version'] = data
elif name == 'Solaris':
data = get_file_content(path).split('\n')[0]
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
elif name == 'SuSE':
data = get_file_content(path).splitlines()
for line in data:
if '=' in line:
self.facts['distribution_release'] = line.split('=')[1].strip()
elif name == 'Debian':
data = get_file_content(path).split('\n')[0]
release = re.search("PRETTY_NAME.+ \(?([^ ]+?)\)?\"", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
else:
self.facts['distribution'] = name
self.facts['os_family'] = self.facts['distribution']
if self.facts['distribution'] in OS_FAMILY:
self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError, e:
pass
def get_public_ssh_host_keys(self):
dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub'
rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub'
ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub'
if self.facts['system'] == 'Darwin':
dsa_filename = '/etc/ssh_host_dsa_key.pub'
rsa_filename = '/etc/ssh_host_rsa_key.pub'
ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub'
dsa = get_file_content(dsa_filename)
rsa = get_file_content(rsa_filename)
ecdsa = get_file_content(ecdsa_filename)
if dsa is None:
dsa = 'NA'
else:
self.facts['ssh_host_key_dsa_public'] = dsa.split()[1]
if rsa is None:
rsa = 'NA'
else:
self.facts['ssh_host_key_rsa_public'] = rsa.split()[1]
if ecdsa is None:
ecdsa = 'NA'
else:
self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1]
def get_pkg_mgr_facts(self):
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.exists(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
def get_lsb_facts(self):
lsb_path = module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = module.run_command([lsb_path, "-a"])
if rc == 0:
self.facts['lsb'] = {}
for line in out.split('\n'):
if len(line) < 1:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
f = open('/etc/lsb-release', 'r')
try:
for line in f.readlines():
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
finally:
f.close()
else:
return self.facts
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except OSError, e:
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except OSError, e:
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except OSError, e:
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except OSError, e:
self.facts['selinux']['type'] = 'unknown'
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
def get_env_facts(self):
self.facts['env'] = {}
for k,v in os.environ.iteritems():
self.facts['env'][k] = v
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Hardware.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in open("/proc/meminfo").readlines():
data = line.split(":", 1)
key = data[0]
if key in LinuxHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
def get_cpu_facts(self):
i = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in open("/proc/cpuinfo").readlines():
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor' or key == 'vendor_id':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
if self.facts['architecture'] != 's390x':
self.facts['processor_count'] = sockets and len(sockets) or i
self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
self.facts['processor_threads_per_core'] = ((cores.values() and
cores.values()[0] or 1) / self.facts['processor_cores'])
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError, e:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
mtab = get_file_content('/etc/mtab', '')
for line in mtab.split('\n'):
if line.startswith('/'):
fields = line.rstrip('\n').split()
if(fields[2] != 'none'):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(fields[1])
size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
except OSError, e:
continue
self.facts['mounts'].append(
{'mount': fields[1],
'device':fields[0],
'fstype': fields[2],
'options': fields[3],
# statvfs data
'size_total': size_total,
'size_available': size_available,
})
def get_device_facts(self):
self.facts['devices'] = {}
lspci = module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = module.run_command([lspci, '-D'])
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError, e:
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/physical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/physical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
d['host'] = m.group(1)
d['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
d['holders'].append(name)
else:
d['holders'].append(folder)
self.facts['devices'][diskname] = d
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.split('\n'):
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = module.run_command(["/usr/sbin/prtconf"])
for line in out.split('\n'):
if 'Memory size' in line:
self.facts['memtotal_mb'] = line.split()[2]
rc, out, err = module.run_command("/usr/sbin/swap -s")
allocated = long(out.split()[1][:-1])
reserved = long(out.split()[5][:-1])
used = long(out.split()[8][:-1])
free = long(out.split()[10][:-1])
self.facts['swapfree_mb'] = free / 1024
self.facts['swaptotal_mb'] = (free + used) / 1024
self.facts['swap_allocated_mb'] = allocated / 1024
self.facts['swap_reserved_mb'] = reserved / 1024
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
- devices
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/sbin/sysctl", "hw"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
(key, value) = line.split('=')
sysctl[key] = value.strip()
return sysctl
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = maketrans(' ', ' ')
data = out.split()
self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
def get_processor_facts(self):
processor = []
dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
i = 0
for line in dmesg_boot.splitlines():
if line.split(' ', 1)[0] == 'cpu%i:' % i:
processor.append(line.split(' ', 1)[1])
i = i + 1
processor_count = i
self.facts['processor'] = processor
self.facts['processor_count'] = processor_count
# I found no way to figure out the number of Cores per CPU in OpenBSD
self.facts['processor_cores'] = 'NA'
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
for line in dmesg_boot.split('\n'):
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = module.run_command("/sbin/sysctl vm.stats")
for line in out.split('\n'):
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = long(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = long(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = long(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/swapinfo -m")
lines = out.split('\n')
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
self.facts['swaptotal_mb'] = data[1]
self.facts['swapfree_mb'] = data[3]
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in open("/proc/cpuinfo").readlines():
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in open("/proc/meminfo").readlines():
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.split('\n'):
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat -v")
for line in out.split('\n'):
data = line.split()
if 'memory pages' in line:
pagecount = long(data[0])
if 'free pages' in line:
freecount = long(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.split('\n')
data = lines[1].split()
swaptotal_mb = long(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
class HPUX(Hardware):
"""
HP-UX-specifig subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) / 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunatley /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().split('\n'):
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if line.rstrip("\n"):
(key, value) = re.split(' = |: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
def get_system_profile(self):
rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
rc, out, err = module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Network.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self, module):
self.module = module
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
and self.facts['distribution_version'].startswith('4.'):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.split('\n')[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = open(os.path.join(path, 'address')).read().strip()
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(open(os.path.join(path, 'mtu')).read().strip())
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = open(os.path.join(path, 'operstate')).read().strip() != 'down'
# if os.path.exists(os.path.join(path, 'carrier')):
# interfaces[device]['link'] = open(os.path.join(path, 'carrier')).read().strip() == '1'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
type = open(os.path.join(path, 'type')).read().strip()
if type == '1':
interfaces[device]['type'] = 'ether'
elif type == '512':
interfaces[device]['type'] = 'ppp'
elif type == '772':
interfaces[device]['type'] = 'loopback'
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = open(os.path.join(path, 'bridge', 'bridge_id')).read().strip()
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = open(os.path.join(path, 'bridge', 'stp_state')).read().strip() == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = open(os.path.join(path, 'bonding', 'slaves')).read().split()
interfaces[device]['mode'] = open(os.path.join(path, 'bonding', 'mode')).read().split()[0]
interfaces[device]['miimon'] = open(os.path.join(path, 'bonding', 'miimon')).read().split()[0]
interfaces[device]['lacp_rate'] = open(os.path.join(path, 'bonding', 'lacp_rate')).read().split()[0]
primary = open(os.path.join(path, 'bonding', 'primary')).read()
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = open(path).read() == '1'
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(open(os.path.join(path, 'flags')).read().strip(),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.split('\n'):
if not line:
continue
words = line.split()
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, stdout, stderr = self.module.run_command(args)
primary_data = stdout
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, stdout, stderr = self.module.run_command(args)
secondary_data = stdout
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
It currently does not define
- default_ipv4 and default_ipv6
- type, mtu and network on interfaces
"""
platform = 'Generic_BSD_Ifconfig'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ifconfig_path = module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
lines = out.split('\n')
for line in lines:
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if not 'interface' in defaults.keys():
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo.keys():
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0].keys():
defaults[item] = ifinfo[ip_type][0][item]
class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2][1:]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class AIXNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces.keys():
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Virtual.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in open('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/1/cgroup'):
for line in open('/proc/1/cgroup').readlines():
if re.search('/lxc/', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in open('/proc/self/status').readlines():
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in open('/proc/cpuinfo').readlines():
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = module.get_bin_path('lscpu')
if lscpu:
rc, out, err = module.run_command(["lscpu"])
if rc == 0:
for line in out.split("\n"):
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in open("/proc/modules").readlines():
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
rc, out, err = module.run_command("/usr/sbin/prtdiag")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
# Check if it's a zone
if os.path.exists("/usr/bin/zonename"):
rc, out, err = module.run_command("/usr/bin/zonename")
if out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
rc, out, err = module.run_command("/usr/sbin/modinfo")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
def get_file_content(path, default=None):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
data = open(path).read().strip()
if len(data) == 0:
data = default
return data
def ansible_facts(module):
facts = {}
facts.update(Facts().populate())
facts.update(Hardware().populate())
facts.update(Network(module).populate())
facts.update(Virtual().populate())
return facts
# ===========================================
def get_all_facts(module):
setup_options = dict(module_setup=True)
facts = ansible_facts(module)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
# Look for the path to the facter and ohai binary and set
# the variable to that path.
facter_path = module.get_bin_path('facter')
ohai_path = module.get_bin_path('ohai')
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
if facter_path is not None:
rc, out, err = module.run_command(facter_path + " --json")
facter = True
try:
facter_ds = json.loads(out)
except:
facter = False
if facter:
for (k,v) in facter_ds.items():
setup_options["facter_%s" % k] = v
# ditto for ohai
if ohai_path is not None:
rc, out, err = module.run_command(ohai_path)
ohai = True
try:
ohai_ds = json.loads(out)
except:
ohai = False
if ohai:
for (k,v) in ohai_ds.items():
k2 = "ohai_%s" % k.replace('-', '_')
setup_options[k2] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['verbose_override'] = True
return setup_result
|
davidraleigh/cxxtest
|
refs/heads/master
|
build_tools/SCons/test/multifile_tests/TestDef.py
|
32
|
links = {'cxxtest' : '../../../../'}
|
CYBAI/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/conformance-checkers/tools/dl.py
|
107
|
# -*- coding: utf-8 -*-
import os
ccdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
template = """<!DOCTYPE html>
<meta charset=utf-8>
"""
errors = {
"dl-in-p": "<p><dl><dt>text<dd>text</dl></p>",
"header-in-dt": "<dl><dt><header>text</header><dd>text</dl>",
"footer-in-dt": "<dl><dt><footer>text</footer><dd>text</dl>",
"article-in-dt": "<dl><dt><article><h2>text</h2></article><dd>text</dl>",
"aside-in-dt": "<dl><dt><aside><h2>text</h2></aside><dd>text</dl>",
"nav-in-dt": "<dl><dt><nav><h2>text</h2></nav><dd>text</dl>",
"section-in-dt": "<dl><dt><section><h2>text</h2></section><dd>text</dl>",
"h1-in-dt": "<dl><dt><h1>text</h1><dd>text</dl>",
"h2-in-dt": "<dl><dt><h2>text</h2><dd>text</dl>",
"h3-in-dt": "<dl><dt><h3>text</h3><dd>text</dl>",
"h4-in-dt": "<dl><dt><h4>text</h4><dd>text</dl>",
"h5-in-dt": "<dl><dt><h5>text</h5><dd>text</dl>",
"h6-in-dt": "<dl><dt><h6>text</h6><dd>text</dl>",
"hgroup-in-dt": "<dl><dt><hgroup><h1>text</h1></hgroup><dd>text</dl>",
"only-dt": "<dl><dt>1</dl>",
"only-dd": "<dl><dd>a</dl>",
"first-dd": "<dl><dd>a<dt>2<dd>b</dl>",
"last-dt": "<dl><dt>1<dd>a<dt>2</dl>",
"dd-in-template": "<dl><dt>1</dt><template><dd>a</dd></template></dl>",
"dt-in-template": "<dl><template><dt>1</dt></template><dd>a</dl>",
"dl-contains-text": "<dl><dt>1</dt>x</dl>",
"dl-contains-text-2": "<dl><dt>1<dd>a</dd>x</dl>",
"dl-contains-dl": "<dl><dt>1<dd>a</dd><dl></dl></dl>",
# div
"empty-div": "<dl><div></div></dl>",
"empty-div-2": "<dl><div></div><div><dt>2<dd>b</div></dl>",
"mixed-dt-dd-div": "<dl><dt>1<dd>a</dd><div><dt>2<dd>b</div></dl>",
"mixed-div-dt-dd": "<dl><div><dt>1<dd>a</div><dt>2<dd>b</dd></dl>",
"nested-divs": "<dl><div><div><dt>1<dd>a</div></div></dl>",
"div-splitting-groups": "<dl><div><dt>1</div><div><dd>a</div></dl>",
"div-splitting-groups-2": "<dl><div><dt>1<dd>a</div><div><dd>b</div></dl>",
"div-splitting-groups-3": "<dl><div><dt>1</div><div><dt>2<dd>b</div></dl>",
"div-contains-text": "<dl><div>x</div><dt>2<dd>b</div></dl>",
"div-contains-dl": "<dl><div><dl></dl></div><dt>2<dd>b</div></dl>",
"div-multiple-groups": "<dl><div><dt>1<dd>a<dt>2<dd>a<dd>b<dt>3<dt>4<dt>5<dd>a</div></dl>",
}
non_errors_in_head = {
"parent-template-in-head": "<template><dl><dt>text<dd>text</dl></template>",
}
non_errors = {
"basic": "<dl><dt>text<dd>text</dl>",
"empty": "<dl></dl>",
"empty-dt-dd": "<dl><dt><dd></dl>",
"multiple-groups": "<dl><dt>1<dd>a<dt>2<dd>a<dd>b<dt>3<dt>4<dt>5<dd>a</dl>",
"header-in-dd": "<dl><dt>text<dd><header>text</header></dl>",
"footer-in-dd": "<dl><dt>text<dd><footer>text</footer></dl>",
"article-in-dd": "<dl><dt>text<dd><article><h2>text</h2></article></dl>",
"aside-in-dd": "<dl><dt>text<dd><aside><h2>text</h2></aside></dl>",
"nav-in-dd": "<dl><dt>text<dd><nav><h2>text</h2></nav></dl>",
"section-in-dd": "<dl><dt>text<dd><section><h2>text</h2></section></dl>",
"h1-in-dd": "<dl><dt>text<dd><h1>text</h1></dl>",
"h2-in-dd": "<dl><dt>text<dd><h2>text</h2></dl>",
"h3-in-dd": "<dl><dt>text<dd><h3>text</h3></dl>",
"h4-in-dd": "<dl><dt>text<dd><h4>text</h4></dl>",
"h5-in-dd": "<dl><dt>text<dd><h5>text</h5></dl>",
"h6-in-dd": "<dl><dt>text<dd><h6>text</h6></dl>",
"p-in-dt": "<dl><dt><p>1<p>1<dd>a</dl>",
"dl-in-dt": "<dl><dt><dl><dt>1<dd>a</dl><dd>b</dl>",
"dl-in-dd": "<dl><dt>1<dd><dl><dt>2<dd>a</dl></dl>",
"interactive": "<dl><dt><a href='#'>1</a><dd><a href='#'>a</a></dl>",
"script": "<dl><script></script></dl>",
"dt-script-dd": "<dl><dt>1</dt><script></script><dd>a</dl>",
"dt-template-dd": "<dl><dt>1</dt><template></template><dd>a</dl>",
# div
"div-basic": "<dl><div><dt>1<dd>a</div></dl>",
"div-script": "<dl><div><dt>1<dd>a</div><script></script></dl>",
"div-script-2": "<dl><div><dt>1</dt><script></script><dd>a</div></dl>",
"div-template": "<dl><div><dt>1<dd>a</div><template></template></dl>",
"div-template-2": "<dl><div><dt>1</dt><template></template><dd>a</div></dl>",
"div-multiple-groups": "<dl><div><dt>1<dd>a</div><div><dt>2<dd>a<dd>b</div><div><dt>3<dt>4<dt>5<dd>a</div></dl>",
}
for key in errors.keys():
template_error = template
template_error += '<title>invalid %s</title>\n' % key
template_error += errors[key]
file = open(os.path.join(ccdir, "html/elements/dl/%s-novalid.html" % key), 'wb')
file.write(template_error)
file.close()
file = open(os.path.join(ccdir, "html/elements/dl/dl-isvalid.html"), 'wb')
file.write(template + '<title>valid dl</title>\n')
for key in non_errors_in_head.keys():
file.write('%s <!-- %s -->\n' % (non_errors_in_head[key], key))
file.write('<body>\n')
for key in non_errors.keys():
file.write('%s <!-- %s -->\n' % (non_errors[key], key))
file.close()
# vim: ts=4:sw=4
|
rie-command/skistream-electron
|
refs/heads/master
|
node_modules/serialport-electron/node_modules/node-gyp/gyp/buildbot/buildbot_run.py
|
270
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import filecmp
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
_ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng'
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# (Re)create the directory so that the following steps will succeed.
if not os.path.isdir(ANDROID_DIR):
os.mkdir(ANDROID_DIR)
# We use a manifest from the gyp project listing pinned revisions of AOSP to
# use, to ensure that we test against a stable target. This needs to be
# updated to pick up new build system changes sometimes, so we must test if
# it has changed.
manifest_filename = 'aosp_manifest.xml'
gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename)
android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests',
manifest_filename)
manifest_is_current = (os.path.isfile(android_manifest) and
filecmp.cmp(gyp_manifest, android_manifest))
if not manifest_is_current:
# It's safe to repeat these steps, so just do them again to make sure we are
# in a good state.
print '@@@BUILD_STEP Initialize Android checkout@@@'
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'master',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
shutil.copy(gyp_manifest, android_manifest)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename],
cwd=ANDROID_DIR)
# If we already built the system image successfully and didn't sync to a new
# version of the source, skip running the build again as it's expensive even
# when there's nothing to do.
system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic',
'system.img')
if manifest_is_current and os.path.isfile(system_img):
return
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', '%s && make -j4' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StartAndroidEmulator():
"""Start an android emulator from the built android tree."""
print '@@@BUILD_STEP Start Android emulator@@@'
CallSubProcess(['/bin/bash', '-c',
'%s && adb kill-server ' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
# If taskset is available, use it to force adbd to run only on one core, as,
# sadly, it improves its reliability (see crbug.com/268450).
adbd_wrapper = ''
with open(os.devnull, 'w') as devnull_fd:
if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0:
adbd_wrapper = 'taskset -c 0'
CallSubProcess(['/bin/bash', '-c',
'%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)],
cwd=ANDROID_DIR)
subprocess.Popen(
['/bin/bash', '-c',
'%s && emulator -no-window' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
CallSubProcess(
['/bin/bash', '-c',
'%s && adb wait-for-device' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StopAndroidEmulator():
"""Stop all android emulators."""
print '@@@BUILD_STEP Stop Android emulator@@@'
# If this fails, it's because there is no emulator running.
subprocess.call(['pkill', 'emulator.*'])
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
StartAndroidEmulator()
try:
retcode += GypTestFormat('android')
finally:
StopAndroidEmulator()
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
|
aselle/tensorflow
|
refs/heads/master
|
tensorflow/examples/adding_an_op/zero_out_op_2.py
|
190
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ZeroOut ops Python library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_zero_out_module = tf.load_op_library(
os.path.join(tf.resource_loader.get_data_files_path(),
'zero_out_op_kernel_2.so'))
zero_out = _zero_out_module.zero_out
zero_out2 = _zero_out_module.zero_out2
zero_out3 = _zero_out_module.zero_out3
|
astrofrog/glue-3d-viewer
|
refs/heads/master
|
glue_vispy_viewers/extern/vispy/ext/cocoapy.py
|
21
|
# -*- coding: utf-8 -*-
from ctypes import (cdll, util, Structure, cast, byref, POINTER, CFUNCTYPE,
c_int, c_long, c_ulong, c_ushort, c_wchar, c_uint32,
c_double, c_uint, c_float, c_void_p, c_char_p, c_bool,
c_buffer, c_ubyte, c_byte, c_int8, c_int16, c_int32,
c_int64, c_short, c_longlong, c_size_t, sizeof,
c_uint8, c_longdouble, c_char, c_ulonglong, py_object,
alignment, ArgumentError)
import platform
import struct
import sys
if sys.version_info[0] >= 3:
string_types = str,
else:
string_types = basestring, # noqa
# Based on Pyglet code
##############################################################################
# cocoatypes.py
__LP64__ = (8 * struct.calcsize("P") == 64)
__i386__ = (platform.machine() == 'i386')
PyObjectEncoding = b'{PyObject=@}'
def encoding_for_ctype(vartype):
typecodes = {c_char: b'c', c_int: b'i', c_short: b's', c_long: b'l',
c_longlong: b'q', c_ubyte: b'C', c_uint: b'I', c_ushort: b'S',
c_ulong: b'L', c_ulonglong: b'Q', c_float: b'f',
c_double: b'd', c_bool: b'B', c_char_p: b'*', c_void_p: b'@',
py_object: PyObjectEncoding}
return typecodes.get(vartype, b'?')
if __LP64__:
NSInteger = c_long
NSUInteger = c_ulong
CGFloat = c_double
NSPointEncoding = b'{CGPoint=dd}'
NSSizeEncoding = b'{CGSize=dd}'
NSRectEncoding = b'{CGRect={CGPoint=dd}{CGSize=dd}}'
NSRangeEncoding = b'{_NSRange=QQ}'
else:
NSInteger = c_int
NSUInteger = c_uint
CGFloat = c_float
NSPointEncoding = b'{_NSPoint=ff}'
NSSizeEncoding = b'{_NSSize=ff}'
NSRectEncoding = b'{_NSRect={_NSPoint=ff}{_NSSize=ff}}'
NSRangeEncoding = b'{_NSRange=II}'
NSIntegerEncoding = encoding_for_ctype(NSInteger)
NSUIntegerEncoding = encoding_for_ctype(NSUInteger)
CGFloatEncoding = encoding_for_ctype(CGFloat)
CGImageEncoding = b'{CGImage=}'
NSZoneEncoding = b'{_NSZone=}'
class NSPoint(Structure):
_fields_ = [("x", CGFloat), ("y", CGFloat)]
CGPoint = NSPoint
class NSSize(Structure):
_fields_ = [("width", CGFloat), ("height", CGFloat)]
CGSize = NSSize
class NSRect(Structure):
_fields_ = [("origin", NSPoint), ("size", NSSize)]
CGRect = NSRect
NSTimeInterval = c_double
CFIndex = c_long
UniChar = c_ushort
unichar = c_wchar
CGGlyph = c_ushort
class CFRange(Structure):
_fields_ = [("location", CFIndex), ("length", CFIndex)]
class NSRange(Structure):
_fields_ = [("location", NSUInteger), ("length", NSUInteger)]
CFTypeID = c_ulong
CFNumberType = c_uint32
##############################################################################
# runtime.py
__LP64__ = (8*struct.calcsize("P") == 64)
__i386__ = (platform.machine() == 'i386')
if sizeof(c_void_p) == 4:
c_ptrdiff_t = c_int32
elif sizeof(c_void_p) == 8:
c_ptrdiff_t = c_int64
objc = cdll.LoadLibrary(util.find_library('objc'))
objc.class_addIvar.restype = c_bool
objc.class_addIvar.argtypes = [c_void_p, c_char_p, c_size_t, c_uint8, c_char_p]
objc.class_addMethod.restype = c_bool
objc.class_addProtocol.restype = c_bool
objc.class_addProtocol.argtypes = [c_void_p, c_void_p]
objc.class_conformsToProtocol.restype = c_bool
objc.class_conformsToProtocol.argtypes = [c_void_p, c_void_p]
objc.class_copyIvarList.restype = POINTER(c_void_p)
objc.class_copyIvarList.argtypes = [c_void_p, POINTER(c_uint)]
objc.class_copyMethodList.restype = POINTER(c_void_p)
objc.class_copyMethodList.argtypes = [c_void_p, POINTER(c_uint)]
objc.class_copyPropertyList.restype = POINTER(c_void_p)
objc.class_copyPropertyList.argtypes = [c_void_p, POINTER(c_uint)]
objc.class_copyProtocolList.restype = POINTER(c_void_p)
objc.class_copyProtocolList.argtypes = [c_void_p, POINTER(c_uint)]
objc.class_createInstance.restype = c_void_p
objc.class_createInstance.argtypes = [c_void_p, c_size_t]
objc.class_getClassMethod.restype = c_void_p
objc.class_getClassMethod.argtypes = [c_void_p, c_void_p]
objc.class_getClassVariable.restype = c_void_p
objc.class_getClassVariable.argtypes = [c_void_p, c_char_p]
objc.class_getInstanceMethod.restype = c_void_p
objc.class_getInstanceMethod.argtypes = [c_void_p, c_void_p]
objc.class_getInstanceSize.restype = c_size_t
objc.class_getInstanceSize.argtypes = [c_void_p]
objc.class_getInstanceVariable.restype = c_void_p
objc.class_getInstanceVariable.argtypes = [c_void_p, c_char_p]
objc.class_getIvarLayout.restype = c_char_p
objc.class_getIvarLayout.argtypes = [c_void_p]
objc.class_getMethodImplementation.restype = c_void_p
objc.class_getMethodImplementation.argtypes = [c_void_p, c_void_p]
objc.class_getMethodImplementation_stret.restype = c_void_p
objc.class_getMethodImplementation_stret.argtypes = [c_void_p, c_void_p]
objc.class_getName.restype = c_char_p
objc.class_getName.argtypes = [c_void_p]
objc.class_getProperty.restype = c_void_p
objc.class_getProperty.argtypes = [c_void_p, c_char_p]
objc.class_getSuperclass.restype = c_void_p
objc.class_getSuperclass.argtypes = [c_void_p]
objc.class_getVersion.restype = c_int
objc.class_getVersion.argtypes = [c_void_p]
objc.class_getWeakIvarLayout.restype = c_char_p
objc.class_getWeakIvarLayout.argtypes = [c_void_p]
objc.class_isMetaClass.restype = c_bool
objc.class_isMetaClass.argtypes = [c_void_p]
objc.class_replaceMethod.restype = c_void_p
objc.class_replaceMethod.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p]
objc.class_respondsToSelector.restype = c_bool
objc.class_respondsToSelector.argtypes = [c_void_p, c_void_p]
objc.class_setIvarLayout.restype = None
objc.class_setIvarLayout.argtypes = [c_void_p, c_char_p]
objc.class_setSuperclass.restype = c_void_p
objc.class_setSuperclass.argtypes = [c_void_p, c_void_p]
objc.class_setVersion.restype = None
objc.class_setVersion.argtypes = [c_void_p, c_int]
objc.class_setWeakIvarLayout.restype = None
objc.class_setWeakIvarLayout.argtypes = [c_void_p, c_char_p]
objc.ivar_getName.restype = c_char_p
objc.ivar_getName.argtypes = [c_void_p]
objc.ivar_getOffset.restype = c_ptrdiff_t
objc.ivar_getOffset.argtypes = [c_void_p]
objc.ivar_getTypeEncoding.restype = c_char_p
objc.ivar_getTypeEncoding.argtypes = [c_void_p]
objc.method_copyArgumentType.restype = c_char_p
objc.method_copyArgumentType.argtypes = [c_void_p, c_uint]
objc.method_copyReturnType.restype = c_char_p
objc.method_copyReturnType.argtypes = [c_void_p]
objc.method_exchangeImplementations.restype = None
objc.method_exchangeImplementations.argtypes = [c_void_p, c_void_p]
objc.method_getArgumentType.restype = None
objc.method_getArgumentType.argtypes = [c_void_p, c_uint, c_char_p, c_size_t]
objc.method_getImplementation.restype = c_void_p
objc.method_getImplementation.argtypes = [c_void_p]
objc.method_getName.restype = c_void_p
objc.method_getName.argtypes = [c_void_p]
objc.method_getNumberOfArguments.restype = c_uint
objc.method_getNumberOfArguments.argtypes = [c_void_p]
objc.method_getReturnType.restype = None
objc.method_getReturnType.argtypes = [c_void_p, c_char_p, c_size_t]
objc.method_getTypeEncoding.restype = c_char_p
objc.method_getTypeEncoding.argtypes = [c_void_p]
objc.method_setImplementation.restype = c_void_p
objc.method_setImplementation.argtypes = [c_void_p, c_void_p]
objc.objc_allocateClassPair.restype = c_void_p
objc.objc_allocateClassPair.argtypes = [c_void_p, c_char_p, c_size_t]
objc.objc_copyProtocolList.restype = POINTER(c_void_p)
objc.objc_copyProtocolList.argtypes = [POINTER(c_int)]
objc.objc_getAssociatedObject.restype = c_void_p
objc.objc_getAssociatedObject.argtypes = [c_void_p, c_void_p]
objc.objc_getClass.restype = c_void_p
objc.objc_getClass.argtypes = [c_char_p]
objc.objc_getClassList.restype = c_int
objc.objc_getClassList.argtypes = [c_void_p, c_int]
objc.objc_getMetaClass.restype = c_void_p
objc.objc_getMetaClass.argtypes = [c_char_p]
objc.objc_getProtocol.restype = c_void_p
objc.objc_getProtocol.argtypes = [c_char_p]
objc.objc_msgSendSuper_stret.restype = None
objc.objc_msgSend_stret.restype = None
objc.objc_registerClassPair.restype = None
objc.objc_registerClassPair.argtypes = [c_void_p]
objc.objc_removeAssociatedObjects.restype = None
objc.objc_removeAssociatedObjects.argtypes = [c_void_p]
objc.objc_setAssociatedObject.restype = None
objc.objc_setAssociatedObject.argtypes = [c_void_p, c_void_p, c_void_p, c_int]
objc.object_copy.restype = c_void_p
objc.object_copy.argtypes = [c_void_p, c_size_t]
objc.object_dispose.restype = c_void_p
objc.object_dispose.argtypes = [c_void_p]
objc.object_getClass.restype = c_void_p
objc.object_getClass.argtypes = [c_void_p]
objc.object_getClassName.restype = c_char_p
objc.object_getClassName.argtypes = [c_void_p]
objc.object_getInstanceVariable.restype = c_void_p
objc.object_getInstanceVariable.argtypes = [c_void_p, c_char_p, c_void_p]
objc.object_getIvar.restype = c_void_p
objc.object_getIvar.argtypes = [c_void_p, c_void_p]
objc.object_setClass.restype = c_void_p
objc.object_setClass.argtypes = [c_void_p, c_void_p]
objc.object_setInstanceVariable.restype = c_void_p
objc.object_setIvar.restype = None
objc.object_setIvar.argtypes = [c_void_p, c_void_p, c_void_p]
objc.property_getAttributes.restype = c_char_p
objc.property_getAttributes.argtypes = [c_void_p]
objc.property_getName.restype = c_char_p
objc.property_getName.argtypes = [c_void_p]
objc.protocol_conformsToProtocol.restype = c_bool
objc.protocol_conformsToProtocol.argtypes = [c_void_p, c_void_p]
class OBJC_METHOD_DESCRIPTION(Structure):
_fields_ = [("name", c_void_p), ("types", c_char_p)]
objc.protocol_copyMethodDescriptionList.restype = \
POINTER(OBJC_METHOD_DESCRIPTION)
objc.protocol_copyMethodDescriptionList.argtypes = [c_void_p, c_bool,
c_bool, POINTER(c_uint)]
objc.protocol_copyPropertyList.restype = c_void_p
objc.protocol_copyPropertyList.argtypes = [c_void_p, POINTER(c_uint)]
objc.protocol_copyProtocolList = POINTER(c_void_p)
objc.protocol_copyProtocolList.argtypes = [c_void_p, POINTER(c_uint)]
objc.protocol_getMethodDescription.restype = OBJC_METHOD_DESCRIPTION
objc.protocol_getMethodDescription.argtypes = [c_void_p, c_void_p,
c_bool, c_bool]
objc.protocol_getName.restype = c_char_p
objc.protocol_getName.argtypes = [c_void_p]
objc.sel_getName.restype = c_char_p
objc.sel_getName.argtypes = [c_void_p]
objc.sel_isEqual.restype = c_bool
objc.sel_isEqual.argtypes = [c_void_p, c_void_p]
objc.sel_registerName.restype = c_void_p
objc.sel_registerName.argtypes = [c_char_p]
def ensure_bytes(x):
if isinstance(x, bytes):
return x
return x.encode('ascii')
def get_selector(name):
return c_void_p(objc.sel_registerName(ensure_bytes(name)))
def get_class(name):
return c_void_p(objc.objc_getClass(ensure_bytes(name)))
def get_object_class(obj):
return c_void_p(objc.object_getClass(obj))
def get_metaclass(name):
return c_void_p(objc.objc_getMetaClass(ensure_bytes(name)))
def get_superclass_of_object(obj):
cls = c_void_p(objc.object_getClass(obj))
return c_void_p(objc.class_getSuperclass(cls))
def x86_should_use_stret(restype):
if type(restype) != type(Structure):
return False
if not __LP64__ and sizeof(restype) <= 8:
return False
if __LP64__ and sizeof(restype) <= 16: # maybe? I don't know?
return False
return True
def should_use_fpret(restype):
if not __i386__:
return False
if __LP64__ and restype == c_longdouble:
return True
if not __LP64__ and restype in (c_float, c_double, c_longdouble):
return True
return False
def send_message(receiver, selName, *args, **kwargs):
if isinstance(receiver, string_types):
receiver = get_class(receiver)
selector = get_selector(selName)
restype = kwargs.get('restype', c_void_p)
argtypes = kwargs.get('argtypes', [])
if should_use_fpret(restype):
objc.objc_msgSend_fpret.restype = restype
objc.objc_msgSend_fpret.argtypes = [c_void_p, c_void_p] + argtypes
result = objc.objc_msgSend_fpret(receiver, selector, *args)
elif x86_should_use_stret(restype):
objc.objc_msgSend_stret.argtypes = [POINTER(restype), c_void_p,
c_void_p] + argtypes
result = restype()
objc.objc_msgSend_stret(byref(result), receiver, selector, *args)
else:
objc.objc_msgSend.restype = restype
objc.objc_msgSend.argtypes = [c_void_p, c_void_p] + argtypes
result = objc.objc_msgSend(receiver, selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
class OBJC_SUPER(Structure):
_fields_ = [('receiver', c_void_p), ('class', c_void_p)]
OBJC_SUPER_PTR = POINTER(OBJC_SUPER)
def send_super(receiver, selName, *args, **kwargs):
if hasattr(receiver, '_as_parameter_'):
receiver = receiver._as_parameter_
superclass = get_superclass_of_object(receiver)
super_struct = OBJC_SUPER(receiver, superclass)
selector = get_selector(selName)
restype = kwargs.get('restype', c_void_p)
argtypes = kwargs.get('argtypes', None)
objc.objc_msgSendSuper.restype = restype
if argtypes:
objc.objc_msgSendSuper.argtypes = [OBJC_SUPER_PTR, c_void_p] + argtypes
else:
objc.objc_msgSendSuper.argtypes = None
result = objc.objc_msgSendSuper(byref(super_struct), selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
cfunctype_table = {}
def parse_type_encoding(encoding):
type_encodings = []
brace_count = 0 # number of unclosed curly braces
bracket_count = 0 # number of unclosed square brackets
typecode = b''
for c in encoding:
if isinstance(c, int):
c = bytes([c])
if c == b'{':
if typecode and typecode[-1:] != b'^' and brace_count == 0 and \
bracket_count == 0:
type_encodings.append(typecode)
typecode = b''
typecode += c
brace_count += 1
elif c == b'}':
typecode += c
brace_count -= 1
assert(brace_count >= 0)
elif c == b'[':
if typecode and typecode[-1:] != b'^' and brace_count == 0 and \
bracket_count == 0:
type_encodings.append(typecode)
typecode = b''
typecode += c
bracket_count += 1
elif c == b']':
typecode += c
bracket_count -= 1
assert(bracket_count >= 0)
elif brace_count or bracket_count:
typecode += c
elif c in b'0123456789':
pass
elif c in b'rnNoORV':
pass
elif c in b'^cislqCISLQfdBv*@#:b?':
if typecode and typecode[-1:] == b'^':
typecode += c
else:
if typecode:
type_encodings.append(typecode)
typecode = c
if typecode:
type_encodings.append(typecode)
return type_encodings
def cfunctype_for_encoding(encoding):
if encoding in cfunctype_table:
return cfunctype_table[encoding]
typecodes = {b'c': c_char, b'i': c_int, b's': c_short, b'l': c_long,
b'q': c_longlong, b'C': c_ubyte, b'I': c_uint, b'S': c_ushort,
b'L': c_ulong, b'Q': c_ulonglong, b'f': c_float,
b'd': c_double, b'B': c_bool, b'v': None, b'*': c_char_p,
b'@': c_void_p, b'#': c_void_p, b':': c_void_p,
NSPointEncoding: NSPoint, NSSizeEncoding: NSSize,
NSRectEncoding: NSRect, NSRangeEncoding: NSRange,
PyObjectEncoding: py_object}
argtypes = []
for code in parse_type_encoding(encoding):
if code in typecodes:
argtypes.append(typecodes[code])
elif code[0:1] == b'^' and code[1:] in typecodes:
argtypes.append(POINTER(typecodes[code[1:]]))
else:
raise Exception('unknown type encoding: ' + code)
cfunctype = CFUNCTYPE(*argtypes)
cfunctype_table[encoding] = cfunctype
return cfunctype
def create_subclass(superclass, name):
if isinstance(superclass, string_types):
superclass = get_class(superclass)
return c_void_p(objc.objc_allocateClassPair(superclass,
ensure_bytes(name), 0))
def register_subclass(subclass):
objc.objc_registerClassPair(subclass)
def add_method(cls, selName, method, types):
type_encodings = parse_type_encoding(types)
assert(type_encodings[1] == b'@') # ensure id self typecode
assert(type_encodings[2] == b':') # ensure SEL cmd typecode
selector = get_selector(selName)
cfunctype = cfunctype_for_encoding(types)
imp = cfunctype(method)
objc.class_addMethod.argtypes = [c_void_p, c_void_p, cfunctype, c_char_p]
objc.class_addMethod(cls, selector, imp, types)
return imp
def add_ivar(cls, name, vartype):
return objc.class_addIvar(cls, ensure_bytes(name), sizeof(vartype),
alignment(vartype), encoding_for_ctype(vartype))
def set_instance_variable(obj, varname, value, vartype):
objc.object_setInstanceVariable.argtypes = [c_void_p, c_char_p, vartype]
objc.object_setInstanceVariable(obj, ensure_bytes(varname), value)
def get_instance_variable(obj, varname, vartype):
variable = vartype()
objc.object_getInstanceVariable(obj, ensure_bytes(varname),
byref(variable))
return variable.value
class ObjCMethod(object):
"""This represents an unbound Objective-C method (really an IMP)."""
typecodes = {b'c': c_byte, b'i': c_int, b's': c_short, b'l': c_long,
b'q': c_longlong, b'C': c_ubyte, b'I': c_uint, b'S': c_ushort,
b'L': c_ulong, b'Q': c_ulonglong, b'f': c_float,
b'd': c_double, b'B': c_bool, b'v': None, b'Vv': None,
b'*': c_char_p, b'@': c_void_p, b'#': c_void_p,
b':': c_void_p, b'^v': c_void_p, b'?': c_void_p,
NSPointEncoding: NSPoint, NSSizeEncoding: NSSize,
NSRectEncoding: NSRect, NSRangeEncoding: NSRange,
PyObjectEncoding: py_object}
cfunctype_table = {}
def __init__(self, method):
self.selector = c_void_p(objc.method_getName(method))
self.name = objc.sel_getName(self.selector)
self.pyname = self.name.replace(b':', b'_')
self.encoding = objc.method_getTypeEncoding(method)
self.return_type = objc.method_copyReturnType(method)
self.nargs = objc.method_getNumberOfArguments(method)
self.imp = c_void_p(objc.method_getImplementation(method))
self.argument_types = []
for i in range(self.nargs):
buffer = c_buffer(512)
objc.method_getArgumentType(method, i, buffer, len(buffer))
self.argument_types.append(buffer.value)
try:
self.argtypes = [self.ctype_for_encoding(t)
for t in self.argument_types]
except:
self.argtypes = None
try:
if self.return_type == b'@':
self.restype = ObjCInstance
elif self.return_type == b'#':
self.restype = ObjCClass
else:
self.restype = self.ctype_for_encoding(self.return_type)
except:
self.restype = None
self.func = None
def ctype_for_encoding(self, encoding):
"""Return ctypes type for an encoded Objective-C type."""
if encoding in self.typecodes:
return self.typecodes[encoding]
elif encoding[0:1] == b'^' and encoding[1:] in self.typecodes:
return POINTER(self.typecodes[encoding[1:]])
elif encoding[0:1] == b'^' and encoding[1:] in [CGImageEncoding,
NSZoneEncoding]:
return c_void_p
elif encoding[0:1] == b'r' and encoding[1:] in self.typecodes:
return self.typecodes[encoding[1:]]
elif encoding[0:2] == b'r^' and encoding[2:] in self.typecodes:
return POINTER(self.typecodes[encoding[2:]])
else:
raise Exception('unknown encoding for %s: %s'
% (self.name, encoding))
def get_prototype(self):
if self.restype == ObjCInstance or self.restype == ObjCClass:
self.prototype = CFUNCTYPE(c_void_p, *self.argtypes)
else:
self.prototype = CFUNCTYPE(self.restype, *self.argtypes)
return self.prototype
def __repr__(self):
return "<ObjCMethod: %s %s>" % (self.name, self.encoding)
def get_callable(self):
if not self.func:
prototype = self.get_prototype()
self.func = cast(self.imp, prototype)
if self.restype == ObjCInstance or self.restype == ObjCClass:
self.func.restype = c_void_p
else:
self.func.restype = self.restype
self.func.argtypes = self.argtypes
return self.func
def __call__(self, objc_id, *args):
f = self.get_callable()
try:
result = f(objc_id, self.selector, *args)
if self.restype == ObjCInstance:
result = ObjCInstance(result)
elif self.restype == ObjCClass:
result = ObjCClass(result)
return result
except ArgumentError as error:
error.args += ('selector = ' + self.name,
'argtypes =' + str(self.argtypes),
'encoding = ' + self.encoding)
raise
class ObjCBoundMethod(object):
def __init__(self, method, objc_id):
self.method = method
self.objc_id = objc_id
def __repr__(self):
return '<ObjCBoundMethod %s (%s)>' % (self.method.name, self.objc_id)
def __call__(self, *args):
return self.method(self.objc_id, *args)
class ObjCClass(object):
_registered_classes = {}
def __new__(cls, class_name_or_ptr):
if isinstance(class_name_or_ptr, string_types):
name = class_name_or_ptr
ptr = get_class(name)
else:
ptr = class_name_or_ptr
if not isinstance(ptr, c_void_p):
ptr = c_void_p(ptr)
name = objc.class_getName(ptr)
if name in cls._registered_classes:
return cls._registered_classes[name]
objc_class = super(ObjCClass, cls).__new__(cls)
objc_class.ptr = ptr
objc_class.name = name
objc_class.instance_methods = {} # mapping of name -> instance method
objc_class.class_methods = {} # mapping of name -> class method
objc_class._as_parameter_ = ptr # for ctypes argument passing
cls._registered_classes[name] = objc_class
objc_class.cache_instance_methods()
objc_class.cache_class_methods()
return objc_class
def __repr__(self):
return "<ObjCClass: %s at %s>" % (self.name, str(self.ptr.value))
def cache_instance_methods(self):
count = c_uint()
method_array = objc.class_copyMethodList(self.ptr, byref(count))
for i in range(count.value):
method = c_void_p(method_array[i])
objc_method = ObjCMethod(method)
self.instance_methods[objc_method.pyname] = objc_method
def cache_class_methods(self):
count = c_uint()
args = [objc.object_getClass(self.ptr), byref(count)]
method_array = objc.class_copyMethodList(*args)
for i in range(count.value):
method = c_void_p(method_array[i])
objc_method = ObjCMethod(method)
self.class_methods[objc_method.pyname] = objc_method
def get_instance_method(self, name):
if name in self.instance_methods:
return self.instance_methods[name]
else:
selector = get_selector(name.replace(b'_', b':'))
method = c_void_p(objc.class_getInstanceMethod(self.ptr, selector))
if method.value:
objc_method = ObjCMethod(method)
self.instance_methods[name] = objc_method
return objc_method
return None
def get_class_method(self, name):
if name in self.class_methods:
return self.class_methods[name]
else:
selector = get_selector(name.replace(b'_', b':'))
method = c_void_p(objc.class_getClassMethod(self.ptr, selector))
if method.value:
objc_method = ObjCMethod(method)
self.class_methods[name] = objc_method
return objc_method
return None
def __getattr__(self, name):
name = ensure_bytes(name)
method = self.get_class_method(name)
if method:
return ObjCBoundMethod(method, self.ptr)
method = self.get_instance_method(name)
if method:
return method
raise AttributeError('ObjCClass %s has no attribute %s'
% (self.name, name))
class ObjCInstance(object):
_cached_objects = {}
def __new__(cls, object_ptr):
if not isinstance(object_ptr, c_void_p):
object_ptr = c_void_p(object_ptr)
if not object_ptr.value:
return None
if object_ptr.value in cls._cached_objects:
return cls._cached_objects[object_ptr.value]
objc_instance = super(ObjCInstance, cls).__new__(cls)
objc_instance.ptr = object_ptr
objc_instance._as_parameter_ = object_ptr
class_ptr = c_void_p(objc.object_getClass(object_ptr))
objc_instance.objc_class = ObjCClass(class_ptr)
cls._cached_objects[object_ptr.value] = objc_instance
observer = send_message(send_message('DeallocationObserver', 'alloc'),
'initWithObject:', objc_instance)
objc.objc_setAssociatedObject(objc_instance, observer, observer, 0x301)
send_message(observer, 'release')
return objc_instance
def __repr__(self):
if self.objc_class.name == b'NSCFString':
from .cocoalibs import cfstring_to_string
string = cfstring_to_string(self)
return ("<ObjCInstance %#x: %s (%s) at %s>"
% (id(self), self.objc_class.name, string,
str(self.ptr.value)))
return ("<ObjCInstance %#x: %s at %s>"
% (id(self), self.objc_class.name, str(self.ptr.value)))
def __getattr__(self, name):
name = ensure_bytes(name)
method = self.objc_class.get_instance_method(name)
if method:
return ObjCBoundMethod(method, self)
method = self.objc_class.get_class_method(name)
if method:
return ObjCBoundMethod(method, self.objc_class.ptr)
keys = list(self.objc_class.instance_methods.keys())
raise AttributeError('ObjCInstance %s has no attribute %s, only:\n%s'
% (self.objc_class.name, name, keys))
def convert_method_arguments(encoding, args):
new_args = []
arg_encodings = parse_type_encoding(encoding)[3:]
for e, a in zip(arg_encodings, args):
if e == b'@':
new_args.append(ObjCInstance(a))
elif e == b'#':
new_args.append(ObjCClass(a))
else:
new_args.append(a)
return new_args
class ObjCSubclass(object):
def __init__(self, superclass, name, register=True):
self._imp_table = {}
self.name = name
self.objc_cls = create_subclass(superclass, name)
self._as_parameter_ = self.objc_cls
if register:
self.register()
def register(self):
objc.objc_registerClassPair(self.objc_cls)
self.objc_metaclass = get_metaclass(self.name)
def add_ivar(self, varname, vartype):
return add_ivar(self.objc_cls, varname, vartype)
def add_method(self, method, name, encoding):
imp = add_method(self.objc_cls, name, method, encoding)
self._imp_table[name] = imp
def add_class_method(self, method, name, encoding):
imp = add_method(self.objc_metaclass, name, method, encoding)
self._imp_table[name] = imp
def rawmethod(self, encoding):
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
name = f.__name__.replace('_', ':')
self.add_method(f, name, encoding)
return f
return decorator
def method(self, encoding):
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_method(objc_self, objc_cmd, *args):
py_self = ObjCInstance(objc_self)
py_self.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_self, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value
elif isinstance(result, ObjCInstance):
result = result.ptr.value
return result
name = f.__name__.replace('_', ':')
self.add_method(objc_method, name, encoding)
return objc_method
return decorator
def classmethod(self, encoding):
"""Function decorator for class methods."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_class_method(objc_cls, objc_cmd, *args):
py_cls = ObjCClass(objc_cls)
py_cls.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_cls, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value
elif isinstance(result, ObjCInstance):
result = result.ptr.value
return result
name = f.__name__.replace('_', ':')
self.add_class_method(objc_class_method, name, encoding)
return objc_class_method
return decorator
# XXX This causes segfaults in all backends (yikes!), and makes it so that
# pyglet can't even be loaded. We'll just have to live with leaks for now,
# which is probably alright since we only use the
# NSFontManager.sharedFontManager class currently.
# class DeallocationObserver_Implementation(object):
# DeallocationObserver = ObjCSubclass('NSObject', 'DeallocationObserver',
# register=False)
# DeallocationObserver.add_ivar('observed_object', c_void_p)
# DeallocationObserver.register()
#
# @DeallocationObserver.rawmethod('@@')
# def initWithObject_(self, cmd, anObject):
# self = send_super(self, 'init')
# self = self.value
# set_instance_variable(self, 'observed_object', anObject, c_void_p)
# return self
#
# @DeallocationObserver.rawmethod('v')
# def dealloc(self, cmd):
# anObject = get_instance_variable(self, 'observed_object', c_void_p)
# ObjCInstance._cached_objects.pop(anObject, None)
# send_super(self, 'dealloc')
#
# @DeallocationObserver.rawmethod('v')
# def finalize(self, cmd):
# anObject = get_instance_variable(self, 'observed_object', c_void_p)
# ObjCInstance._cached_objects.pop(anObject, None)
# send_super(self, 'finalize')
##############################################################################
# cocoalibs.py
cf = cdll.LoadLibrary(util.find_library('CoreFoundation'))
kCFStringEncodingUTF8 = 0x08000100
CFAllocatorRef = c_void_p
CFStringEncoding = c_uint32
cf.CFStringCreateWithCString.restype = c_void_p
cf.CFStringCreateWithCString.argtypes = [CFAllocatorRef, c_void_p,
CFStringEncoding]
cf.CFRelease.restype = c_void_p
cf.CFRelease.argtypes = [c_void_p]
cf.CFStringGetLength.restype = CFIndex
cf.CFStringGetLength.argtypes = [c_void_p]
cf.CFStringGetMaximumSizeForEncoding.restype = CFIndex
cf.CFStringGetMaximumSizeForEncoding.argtypes = [CFIndex, CFStringEncoding]
cf.CFStringGetCString.restype = c_bool
cf.CFStringGetCString.argtypes = [c_void_p, c_char_p, CFIndex,
CFStringEncoding]
cf.CFStringGetTypeID.restype = CFTypeID
cf.CFStringGetTypeID.argtypes = []
cf.CFAttributedStringCreate.restype = c_void_p
cf.CFAttributedStringCreate.argtypes = [CFAllocatorRef, c_void_p, c_void_p]
cf.CFURLCreateWithFileSystemPath.restype = c_void_p
cf.CFURLCreateWithFileSystemPath.argtypes = [CFAllocatorRef, c_void_p,
CFIndex, c_bool]
def CFSTR(string):
args = [None, string.encode('utf8'), kCFStringEncodingUTF8]
return ObjCInstance(c_void_p(cf.CFStringCreateWithCString(*args)))
def get_NSString(string):
"""Autoreleased version of CFSTR"""
return CFSTR(string).autorelease()
def cfstring_to_string(cfstring):
length = cf.CFStringGetLength(cfstring)
size = cf.CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8)
buffer = c_buffer(size + 1)
result = cf.CFStringGetCString(cfstring, buffer, len(buffer),
kCFStringEncodingUTF8)
if result:
return buffer.value.decode('utf8')
cf.CFDataCreate.restype = c_void_p
cf.CFDataCreate.argtypes = [c_void_p, c_void_p, CFIndex]
cf.CFDataGetBytes.restype = None
cf.CFDataGetBytes.argtypes = [c_void_p, CFRange, c_void_p]
cf.CFDataGetLength.restype = CFIndex
cf.CFDataGetLength.argtypes = [c_void_p]
cf.CFDictionaryGetValue.restype = c_void_p
cf.CFDictionaryGetValue.argtypes = [c_void_p, c_void_p]
cf.CFDictionaryAddValue.restype = None
cf.CFDictionaryAddValue.argtypes = [c_void_p, c_void_p, c_void_p]
cf.CFDictionaryCreateMutable.restype = c_void_p
cf.CFDictionaryCreateMutable.argtypes = [CFAllocatorRef, CFIndex,
c_void_p, c_void_p]
cf.CFNumberCreate.restype = c_void_p
cf.CFNumberCreate.argtypes = [CFAllocatorRef, CFNumberType, c_void_p]
cf.CFNumberGetType.restype = CFNumberType
cf.CFNumberGetType.argtypes = [c_void_p]
cf.CFNumberGetValue.restype = c_ubyte
cf.CFNumberGetValue.argtypes = [c_void_p, CFNumberType, c_void_p]
cf.CFNumberGetTypeID.restype = CFTypeID
cf.CFNumberGetTypeID.argtypes = []
cf.CFGetTypeID.restype = CFTypeID
cf.CFGetTypeID.argtypes = [c_void_p]
# CFNumber.h
kCFNumberSInt8Type = 1
kCFNumberSInt16Type = 2
kCFNumberSInt32Type = 3
kCFNumberSInt64Type = 4
kCFNumberFloat32Type = 5
kCFNumberFloat64Type = 6
kCFNumberCharType = 7
kCFNumberShortType = 8
kCFNumberIntType = 9
kCFNumberLongType = 10
kCFNumberLongLongType = 11
kCFNumberFloatType = 12
kCFNumberDoubleType = 13
kCFNumberCFIndexType = 14
kCFNumberNSIntegerType = 15
kCFNumberCGFloatType = 16
kCFNumberMaxType = 16
def cfnumber_to_number(cfnumber):
"""Convert CFNumber to python int or float."""
numeric_type = cf.CFNumberGetType(cfnumber)
cfnum_to_ctype = {kCFNumberSInt8Type: c_int8, kCFNumberSInt16Type: c_int16,
kCFNumberSInt32Type: c_int32,
kCFNumberSInt64Type: c_int64,
kCFNumberFloat32Type: c_float,
kCFNumberFloat64Type: c_double,
kCFNumberCharType: c_byte, kCFNumberShortType: c_short,
kCFNumberIntType: c_int, kCFNumberLongType: c_long,
kCFNumberLongLongType: c_longlong,
kCFNumberFloatType: c_float,
kCFNumberDoubleType: c_double,
kCFNumberCFIndexType: CFIndex,
kCFNumberCGFloatType: CGFloat}
if numeric_type in cfnum_to_ctype:
t = cfnum_to_ctype[numeric_type]
result = t()
if cf.CFNumberGetValue(cfnumber, numeric_type, byref(result)):
return result.value
else:
raise Exception(
'cfnumber_to_number: unhandled CFNumber type %d' % numeric_type)
# Dictionary of cftypes matched to the method converting them to python values.
known_cftypes = {cf.CFStringGetTypeID(): cfstring_to_string,
cf.CFNumberGetTypeID(): cfnumber_to_number}
def cftype_to_value(cftype):
"""Convert a CFType into an equivalent python type.
The convertible CFTypes are taken from the known_cftypes
dictionary, which may be added to if another library implements
its own conversion methods."""
if not cftype:
return None
typeID = cf.CFGetTypeID(cftype)
if typeID in known_cftypes:
convert_function = known_cftypes[typeID]
return convert_function(cftype)
else:
return cftype
cf.CFSetGetCount.restype = CFIndex
cf.CFSetGetCount.argtypes = [c_void_p]
cf.CFSetGetValues.restype = None
# PyPy 1.7 is fine with 2nd arg as POINTER(c_void_p),
# but CPython ctypes 1.1.0 complains, so just use c_void_p.
cf.CFSetGetValues.argtypes = [c_void_p, c_void_p]
def cfset_to_set(cfset):
"""Convert CFSet to python set."""
count = cf.CFSetGetCount(cfset)
buffer = (c_void_p * count)()
cf.CFSetGetValues(cfset, byref(buffer))
return set([cftype_to_value(c_void_p(buffer[i])) for i in range(count)])
cf.CFArrayGetCount.restype = CFIndex
cf.CFArrayGetCount.argtypes = [c_void_p]
cf.CFArrayGetValueAtIndex.restype = c_void_p
cf.CFArrayGetValueAtIndex.argtypes = [c_void_p, CFIndex]
def cfarray_to_list(cfarray):
"""Convert CFArray to python list."""
count = cf.CFArrayGetCount(cfarray)
return [cftype_to_value(c_void_p(cf.CFArrayGetValueAtIndex(cfarray, i)))
for i in range(count)]
kCFRunLoopDefaultMode = c_void_p.in_dll(cf, 'kCFRunLoopDefaultMode')
cf.CFRunLoopGetCurrent.restype = c_void_p
cf.CFRunLoopGetCurrent.argtypes = []
cf.CFRunLoopGetMain.restype = c_void_p
cf.CFRunLoopGetMain.argtypes = []
cf.CFShow.restype = None
cf.CFShow.argtypes = [c_void_p]
######################################################################
# APPLICATION KIT
# Even though we don't use this directly, it must be loaded so that
# we can find the NSApplication, NSWindow, and NSView classes.
appkit = cdll.LoadLibrary(util.find_library('AppKit'))
NSDefaultRunLoopMode = c_void_p.in_dll(appkit, 'NSDefaultRunLoopMode')
NSEventTrackingRunLoopMode = c_void_p.in_dll(
appkit, 'NSEventTrackingRunLoopMode')
NSApplicationDidHideNotification = c_void_p.in_dll(
appkit, 'NSApplicationDidHideNotification')
NSApplicationDidUnhideNotification = c_void_p.in_dll(
appkit, 'NSApplicationDidUnhideNotification')
# /System/Library/Frameworks/AppKit.framework/Headers/NSEvent.h
# NSAnyEventMask = 0xFFFFFFFFL # NSUIntegerMax
# Commented out b/c not Py3k compatible
NSKeyDown = 10
NSKeyUp = 11
NSFlagsChanged = 12
NSApplicationDefined = 15
NSAlphaShiftKeyMask = 1 << 16
NSShiftKeyMask = 1 << 17
NSControlKeyMask = 1 << 18
NSAlternateKeyMask = 1 << 19
NSCommandKeyMask = 1 << 20
NSNumericPadKeyMask = 1 << 21
NSHelpKeyMask = 1 << 22
NSFunctionKeyMask = 1 << 23
NSInsertFunctionKey = 0xF727
NSDeleteFunctionKey = 0xF728
NSHomeFunctionKey = 0xF729
NSBeginFunctionKey = 0xF72A
NSEndFunctionKey = 0xF72B
NSPageUpFunctionKey = 0xF72C
NSPageDownFunctionKey = 0xF72D
# /System/Library/Frameworks/AppKit.framework/Headers/NSWindow.h
NSBorderlessWindowMask = 0
NSTitledWindowMask = 1 << 0
NSClosableWindowMask = 1 << 1
NSMiniaturizableWindowMask = 1 << 2
NSResizableWindowMask = 1 << 3
# /System/Library/Frameworks/AppKit.framework/Headers/NSPanel.h
NSUtilityWindowMask = 1 << 4
# /System/Library/Frameworks/AppKit.framework/Headers/NSGraphics.h
NSBackingStoreRetained = 0
NSBackingStoreNonretained = 1
NSBackingStoreBuffered = 2
# /System/Library/Frameworks/AppKit.framework/Headers/NSTrackingArea.h
NSTrackingMouseEnteredAndExited = 0x01
NSTrackingMouseMoved = 0x02
NSTrackingCursorUpdate = 0x04
NSTrackingActiveInActiveApp = 0x40
# /System/Library/Frameworks/AppKit.framework/Headers/NSOpenGL.h
NSOpenGLPFAAllRenderers = 1 # choose from all available renderers
NSOpenGLPFADoubleBuffer = 5 # choose a double buffered pixel format
NSOpenGLPFAStereo = 6 # stereo buffering supported
NSOpenGLPFAAuxBuffers = 7 # number of aux buffers
NSOpenGLPFAColorSize = 8 # number of color buffer bits
NSOpenGLPFAAlphaSize = 11 # number of alpha component bits
NSOpenGLPFADepthSize = 12 # number of depth buffer bits
NSOpenGLPFAStencilSize = 13 # number of stencil buffer bits
NSOpenGLPFAAccumSize = 14 # number of accum buffer bits
NSOpenGLPFAMinimumPolicy = 51 # never choose smaller buffers than requested
NSOpenGLPFAMaximumPolicy = 52 # choose largest buffers of type requested
NSOpenGLPFAOffScreen = 53 # choose an off-screen capable renderer
NSOpenGLPFAFullScreen = 54 # choose a full-screen capable renderer
NSOpenGLPFASampleBuffers = 55 # number of multi sample buffers
NSOpenGLPFASamples = 56 # number of samples per multi sample buffer
NSOpenGLPFAAuxDepthStencil = 57 # each aux buffer has its own depth stencil
NSOpenGLPFAColorFloat = 58 # color buffers store floating point pixels
NSOpenGLPFAMultisample = 59 # choose multisampling
NSOpenGLPFASupersample = 60 # choose supersampling
NSOpenGLPFASampleAlpha = 61 # request alpha filtering
NSOpenGLPFARendererID = 70 # request renderer by ID
NSOpenGLPFASingleRenderer = 71 # choose a single renderer for all screens
NSOpenGLPFANoRecovery = 72 # disable all failure recovery systems
NSOpenGLPFAAccelerated = 73 # choose a hardware accelerated renderer
NSOpenGLPFAClosestPolicy = 74 # choose the closest color buffer to request
NSOpenGLPFARobust = 75 # renderer does not need failure recovery
NSOpenGLPFABackingStore = 76 # back buffer contents are valid after swap
NSOpenGLPFAMPSafe = 78 # renderer is multi-processor safe
NSOpenGLPFAWindow = 80 # can be used to render to an onscreen window
NSOpenGLPFAMultiScreen = 81 # single window can span multiple screens
NSOpenGLPFACompliant = 83 # renderer is opengl compliant
NSOpenGLPFAScreenMask = 84 # bit mask of supported physical screens
NSOpenGLPFAPixelBuffer = 90 # can be used to render to a pbuffer
# can be used to render offline to a pbuffer
NSOpenGLPFARemotePixelBuffer = 91
NSOpenGLPFAAllowOfflineRenderers = 96 # allow use of offline renderers
# choose a hardware accelerated compute device
NSOpenGLPFAAcceleratedCompute = 97
# number of virtual screens in this format
NSOpenGLPFAVirtualScreenCount = 128
NSOpenGLCPSwapInterval = 222
# /System/Library/Frameworks/ApplicationServices.framework/Frameworks/...
# CoreGraphics.framework/Headers/CGImage.h
kCGImageAlphaNone = 0
kCGImageAlphaPremultipliedLast = 1
kCGImageAlphaPremultipliedFirst = 2
kCGImageAlphaLast = 3
kCGImageAlphaFirst = 4
kCGImageAlphaNoneSkipLast = 5
kCGImageAlphaNoneSkipFirst = 6
kCGImageAlphaOnly = 7
kCGImageAlphaPremultipliedLast = 1
kCGBitmapAlphaInfoMask = 0x1F
kCGBitmapFloatComponents = 1 << 8
kCGBitmapByteOrderMask = 0x7000
kCGBitmapByteOrderDefault = 0 << 12
kCGBitmapByteOrder16Little = 1 << 12
kCGBitmapByteOrder32Little = 2 << 12
kCGBitmapByteOrder16Big = 3 << 12
kCGBitmapByteOrder32Big = 4 << 12
# NSApplication.h
NSApplicationPresentationDefault = 0
NSApplicationPresentationHideDock = 1 << 1
NSApplicationPresentationHideMenuBar = 1 << 3
NSApplicationPresentationDisableProcessSwitching = 1 << 5
NSApplicationPresentationDisableHideApplication = 1 << 8
# NSRunningApplication.h
NSApplicationActivationPolicyRegular = 0
NSApplicationActivationPolicyAccessory = 1
NSApplicationActivationPolicyProhibited = 2
######################################################################
# QUARTZ / COREGRAPHICS
quartz = cdll.LoadLibrary(util.find_library('quartz'))
CGDirectDisplayID = c_uint32 # CGDirectDisplay.h
CGError = c_int32 # CGError.h
CGBitmapInfo = c_uint32 # CGImage.h
# /System/Library/Frameworks/ApplicationServices.framework/Frameworks/...
# ImageIO.framework/Headers/CGImageProperties.h
kCGImagePropertyGIFDictionary = c_void_p.in_dll(
quartz, 'kCGImagePropertyGIFDictionary')
kCGImagePropertyGIFDelayTime = c_void_p.in_dll(
quartz, 'kCGImagePropertyGIFDelayTime')
# /System/Library/Frameworks/ApplicationServices.framework/Frameworks/...
# CoreGraphics.framework/Headers/CGColorSpace.h
kCGRenderingIntentDefault = 0
quartz.CGDisplayIDToOpenGLDisplayMask.restype = c_uint32
quartz.CGDisplayIDToOpenGLDisplayMask.argtypes = [c_uint32]
quartz.CGMainDisplayID.restype = CGDirectDisplayID
quartz.CGMainDisplayID.argtypes = []
quartz.CGShieldingWindowLevel.restype = c_int32
quartz.CGShieldingWindowLevel.argtypes = []
quartz.CGCursorIsVisible.restype = c_bool
quartz.CGDisplayCopyAllDisplayModes.restype = c_void_p
quartz.CGDisplayCopyAllDisplayModes.argtypes = [CGDirectDisplayID, c_void_p]
quartz.CGDisplaySetDisplayMode.restype = CGError
quartz.CGDisplaySetDisplayMode.argtypes = [
CGDirectDisplayID, c_void_p, c_void_p]
quartz.CGDisplayCapture.restype = CGError
quartz.CGDisplayCapture.argtypes = [CGDirectDisplayID]
quartz.CGDisplayRelease.restype = CGError
quartz.CGDisplayRelease.argtypes = [CGDirectDisplayID]
quartz.CGDisplayCopyDisplayMode.restype = c_void_p
quartz.CGDisplayCopyDisplayMode.argtypes = [CGDirectDisplayID]
quartz.CGDisplayModeGetRefreshRate.restype = c_double
quartz.CGDisplayModeGetRefreshRate.argtypes = [c_void_p]
quartz.CGDisplayModeRetain.restype = c_void_p
quartz.CGDisplayModeRetain.argtypes = [c_void_p]
quartz.CGDisplayModeRelease.restype = None
quartz.CGDisplayModeRelease.argtypes = [c_void_p]
quartz.CGDisplayModeGetWidth.restype = c_size_t
quartz.CGDisplayModeGetWidth.argtypes = [c_void_p]
quartz.CGDisplayModeGetHeight.restype = c_size_t
quartz.CGDisplayModeGetHeight.argtypes = [c_void_p]
quartz.CGDisplayModeCopyPixelEncoding.restype = c_void_p
quartz.CGDisplayModeCopyPixelEncoding.argtypes = [c_void_p]
quartz.CGGetActiveDisplayList.restype = CGError
quartz.CGGetActiveDisplayList.argtypes = [
c_uint32, POINTER(CGDirectDisplayID), POINTER(c_uint32)]
quartz.CGDisplayBounds.restype = CGRect
quartz.CGDisplayBounds.argtypes = [CGDirectDisplayID]
quartz.CGImageSourceCreateWithData.restype = c_void_p
quartz.CGImageSourceCreateWithData.argtypes = [c_void_p, c_void_p]
quartz.CGImageSourceCreateImageAtIndex.restype = c_void_p
quartz.CGImageSourceCreateImageAtIndex.argtypes = [
c_void_p, c_size_t, c_void_p]
quartz.CGImageSourceCopyPropertiesAtIndex.restype = c_void_p
quartz.CGImageSourceCopyPropertiesAtIndex.argtypes = [
c_void_p, c_size_t, c_void_p]
quartz.CGImageGetDataProvider.restype = c_void_p
quartz.CGImageGetDataProvider.argtypes = [c_void_p]
quartz.CGDataProviderCopyData.restype = c_void_p
quartz.CGDataProviderCopyData.argtypes = [c_void_p]
quartz.CGDataProviderCreateWithCFData.restype = c_void_p
quartz.CGDataProviderCreateWithCFData.argtypes = [c_void_p]
quartz.CGImageCreate.restype = c_void_p
quartz.CGImageCreate.argtypes = [c_size_t, c_size_t, c_size_t, c_size_t,
c_size_t, c_void_p, c_uint32, c_void_p,
c_void_p, c_bool, c_int]
quartz.CGImageRelease.restype = None
quartz.CGImageRelease.argtypes = [c_void_p]
quartz.CGImageGetBytesPerRow.restype = c_size_t
quartz.CGImageGetBytesPerRow.argtypes = [c_void_p]
quartz.CGImageGetWidth.restype = c_size_t
quartz.CGImageGetWidth.argtypes = [c_void_p]
quartz.CGImageGetHeight.restype = c_size_t
quartz.CGImageGetHeight.argtypes = [c_void_p]
quartz.CGImageGetBitsPerPixel.restype = c_size_t
quartz.CGImageGetBitsPerPixel.argtypes = [c_void_p]
quartz.CGImageGetBitmapInfo.restype = CGBitmapInfo
quartz.CGImageGetBitmapInfo.argtypes = [c_void_p]
quartz.CGColorSpaceCreateDeviceRGB.restype = c_void_p
quartz.CGColorSpaceCreateDeviceRGB.argtypes = []
quartz.CGDataProviderRelease.restype = None
quartz.CGDataProviderRelease.argtypes = [c_void_p]
quartz.CGColorSpaceRelease.restype = None
quartz.CGColorSpaceRelease.argtypes = [c_void_p]
quartz.CGWarpMouseCursorPosition.restype = CGError
quartz.CGWarpMouseCursorPosition.argtypes = [CGPoint]
quartz.CGDisplayMoveCursorToPoint.restype = CGError
quartz.CGDisplayMoveCursorToPoint.argtypes = [CGDirectDisplayID, CGPoint]
quartz.CGAssociateMouseAndMouseCursorPosition.restype = CGError
quartz.CGAssociateMouseAndMouseCursorPosition.argtypes = [c_bool]
quartz.CGBitmapContextCreate.restype = c_void_p
quartz.CGBitmapContextCreate.argtypes = [
c_void_p, c_size_t, c_size_t, c_size_t, c_size_t, c_void_p, CGBitmapInfo]
quartz.CGBitmapContextCreateImage.restype = c_void_p
quartz.CGBitmapContextCreateImage.argtypes = [c_void_p]
quartz.CGFontCreateWithDataProvider.restype = c_void_p
quartz.CGFontCreateWithDataProvider.argtypes = [c_void_p]
quartz.CGFontCreateWithFontName.restype = c_void_p
quartz.CGFontCreateWithFontName.argtypes = [c_void_p]
quartz.CGContextDrawImage.restype = None
quartz.CGContextDrawImage.argtypes = [c_void_p, CGRect, c_void_p]
quartz.CGContextRelease.restype = None
quartz.CGContextRelease.argtypes = [c_void_p]
quartz.CGContextSetTextPosition.restype = None
quartz.CGContextSetTextPosition.argtypes = [c_void_p, CGFloat, CGFloat]
quartz.CGContextSetShouldAntialias.restype = None
quartz.CGContextSetShouldAntialias.argtypes = [c_void_p, c_bool]
quartz.CGDataProviderCreateWithURL.restype = c_void_p
quartz.CGDataProviderCreateWithURL.argtypes = [c_void_p]
quartz.CGFontCreateWithDataProvider.restype = c_void_p
quartz.CGFontCreateWithDataProvider.argtypes = [c_void_p]
quartz.CGDisplayScreenSize.argtypes = [CGDirectDisplayID]
quartz.CGDisplayScreenSize.restype = CGSize
quartz.CGDisplayBounds.argtypes = [CGDirectDisplayID]
quartz.CGDisplayBounds.restype = CGRect
######################################################################
# CORETEXT
ct = cdll.LoadLibrary(util.find_library('CoreText'))
# Types
CTFontOrientation = c_uint32 # CTFontDescriptor.h
CTFontSymbolicTraits = c_uint32 # CTFontTraits.h
# CoreText constants
kCTFontAttributeName = c_void_p.in_dll(ct, 'kCTFontAttributeName')
kCTFontFamilyNameAttribute = c_void_p.in_dll(ct, 'kCTFontFamilyNameAttribute')
kCTFontSymbolicTrait = c_void_p.in_dll(ct, 'kCTFontSymbolicTrait')
kCTFontWeightTrait = c_void_p.in_dll(ct, 'kCTFontWeightTrait')
kCTFontTraitsAttribute = c_void_p.in_dll(ct, 'kCTFontTraitsAttribute')
# constants from CTFontTraits.h
kCTFontItalicTrait = (1 << 0)
kCTFontBoldTrait = (1 << 1)
ct.CTLineCreateWithAttributedString.restype = c_void_p
ct.CTLineCreateWithAttributedString.argtypes = [c_void_p]
ct.CTLineDraw.restype = None
ct.CTLineDraw.argtypes = [c_void_p, c_void_p]
ct.CTFontGetBoundingRectsForGlyphs.restype = CGRect
ct.CTFontGetBoundingRectsForGlyphs.argtypes = [
c_void_p, CTFontOrientation, POINTER(CGGlyph), POINTER(CGRect), CFIndex]
ct.CTFontGetAdvancesForGlyphs.restype = c_double
ct.CTFontGetAdvancesForGlyphs.argtypes = [
c_void_p, CTFontOrientation, POINTER(CGGlyph), POINTER(CGSize), CFIndex]
ct.CTFontGetAscent.restype = CGFloat
ct.CTFontGetAscent.argtypes = [c_void_p]
ct.CTFontGetDescent.restype = CGFloat
ct.CTFontGetDescent.argtypes = [c_void_p]
ct.CTFontGetSymbolicTraits.restype = CTFontSymbolicTraits
ct.CTFontGetSymbolicTraits.argtypes = [c_void_p]
ct.CTFontGetGlyphsForCharacters.restype = c_bool
ct.CTFontGetGlyphsForCharacters.argtypes = [
c_void_p, POINTER(UniChar), POINTER(CGGlyph), CFIndex]
ct.CTFontCreateWithGraphicsFont.restype = c_void_p
ct.CTFontCreateWithGraphicsFont.argtypes = [c_void_p, CGFloat, c_void_p,
c_void_p]
ct.CTFontCopyFamilyName.restype = c_void_p
ct.CTFontCopyFamilyName.argtypes = [c_void_p]
ct.CTFontCopyFullName.restype = c_void_p
ct.CTFontCopyFullName.argtypes = [c_void_p]
ct.CTFontCreateWithFontDescriptor.restype = c_void_p
ct.CTFontCreateWithFontDescriptor.argtypes = [c_void_p, CGFloat, c_void_p]
ct.CTFontCreateCopyWithAttributes.restype = c_void_p
ct.CTFontCreateCopyWithAttributes.argtypes = [c_void_p, CGFloat, c_void_p,
c_void_p]
ct.CTFontDescriptorCreateWithAttributes.restype = c_void_p
ct.CTFontDescriptorCreateWithAttributes.argtypes = [c_void_p]
ct.CTTypesetterCreateWithAttributedString.restype = c_void_p
ct.CTTypesetterCreateWithAttributedString.argtypes = [c_void_p]
ct.CTTypesetterCreateLine.restype = c_void_p
ct.CTTypesetterCreateLine.argtypes = [c_void_p, CFRange]
ct.CTLineGetOffsetForStringIndex.restype = CGFloat
ct.CTLineGetOffsetForStringIndex.argtypes = [c_void_p, CFIndex,
POINTER(CGFloat)]
ct.CTFontManagerCreateFontDescriptorsFromURL.restype = c_void_p
ct.CTFontManagerCreateFontDescriptorsFromURL.argtypes = [c_void_p]
######################################################################
# FOUNDATION
# foundation = cdll.LoadLibrary(util.find_library('Foundation'))
# foundation.NSMouseInRect.restype = c_bool
# foundation.NSMouseInRect.argtypes = [NSPoint, NSRect, c_bool]
|
Slezhuk/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/dellos9.py
|
8
|
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# (c) 2017 Red Hat, Inc
#
# Copyright (c) 2016 Dell Inc.
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.netcfg import NetworkConfig,ConfigLine
_DEVICE_CONFIGS = {}
WARNING_PROMPTS_RE = [
r"[\r\n]?\[confirm yes/no\]:\s?$",
r"[\r\n]?\[y/n\]:\s?$",
r"[\r\n]?\[yes/no\]:\s?$"
]
dellos9_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in dellos9_argument_spec:
if key != 'provider' and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
def get_config(module, flags=[]):
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=err)
cfg = str(out).strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=err, rc=rc)
responses.append(out)
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'configure terminal')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=err)
for command in to_list(commands):
if command == 'end':
continue
cmd = {'command': command, 'prompt': WARNING_PROMPTS_RE, 'answer': 'yes'}
rc, out, err = exec_command(module, module.jsonify(cmd))
if rc != 0:
module.fail_json(msg=err, command=command, rc=rc)
exec_command(module, 'end')
def get_sublevel_config(running_config, module):
contents = list()
current_config_contents = list()
running_config = NetworkConfig(contents=running_config, indent=1)
obj = running_config.get_object(module.params['parents'])
if obj:
contents = obj.children
contents[:0] = module.params['parents']
indent = 0
for c in contents:
if isinstance(c, str):
current_config_contents.append(c.rjust(len(c) + indent, ' '))
if isinstance(c, ConfigLine):
current_config_contents.append(c.raw)
indent = 1
sublevel_config = '\n'.join(current_config_contents)
return sublevel_config
|
mlassnig/pilot
|
refs/heads/master
|
__init__.py
|
12133432
| |
tedelhourani/ansible
|
refs/heads/devel
|
test/units/module_utils/facts/hardware/__init__.py
|
12133432
| |
indictranstech/erpnext
|
refs/heads/develop
|
erpnext/healthcare/doctype/normal_test_items/__init__.py
|
12133432
| |
sejust/pykit
|
refs/heads/master
|
etcd/client.py
|
2
|
#!/usr/bin/env python2
# coding: utf-8
import httplib
import logging
import socket
import time
import urllib
import urlparse
from pykit import http
from pykit import utfjson
logger = logging.getLogger(__name__)
class EtcdException(Exception):
pass
class EtcdInternalError(EtcdException):
pass
class NoMoreMachineError(EtcdException):
pass
class EtcdReadTimeoutError(EtcdException):
pass
class EtcdRequestError(EtcdException):
pass
class EtcdResponseError(EtcdException):
pass
class EtcdIncompleteRead(EtcdResponseError):
pass
class EtcdSSLError(EtcdException):
pass
class EtcdWatchError(EtcdException):
pass
class EtcdKeyError(EtcdException, KeyError):
pass
class EtcdValueError(EtcdException, ValueError):
pass
class EcodeKeyNotFound(EtcdKeyError):
pass
class EcodeTestFailed(EtcdValueError):
pass
class EcodeNotFile(EtcdKeyError):
pass
class EcodeNotDir(EtcdKeyError):
pass
class EcodeNodeExist(EtcdKeyError):
pass
class EcodeRootROnly(EtcdValueError):
pass
class EcodeDirNotEmpty(EtcdValueError):
pass
class EcodePrevValueRequired(EtcdValueError):
pass
class EcodeTTLNaN(EtcdValueError):
pass
class EcodeIndexNaN(EtcdValueError):
pass
class EcodeInvalidField(EtcdValueError):
pass
class EcodeInvalidForm(EtcdValueError):
pass
class EcodeInscientPermissions(EtcdException):
pass
def list_type(x):
if isinstance(x, (list, tuple)):
return True
return False
class EtcdError(object):
error_exceptions = {
100: EcodeKeyNotFound,
101: EcodeTestFailed,
102: EcodeNotFile,
103: EtcdException,
104: EcodeNotDir,
105: EcodeNodeExist,
106: EtcdKeyError,
107: EcodeRootROnly,
108: EcodeDirNotEmpty,
110: EcodeInscientPermissions,
200: EtcdValueError,
201: EcodePrevValueRequired,
202: EcodeTTLNaN,
203: EcodeIndexNaN,
209: EcodeInvalidField,
210: EcodeInvalidForm,
300: EtcdInternalError,
301: EtcdInternalError,
400: EtcdWatchError,
401: EtcdWatchError,
500: EtcdInternalError,
}
@classmethod
def handle(cls, response):
body = response.data
e = {}
e['status'] = response.status
e['headers'] = response.headers
e['response'] = body
try:
r = utfjson.load(body)
except ValueError:
r = {"message": "response body is not json", "cause": str(body)}
ecode = r.get('errorCode')
default_exc = EtcdException
if response.status == 404:
ecode = 100
elif response.status == 401:
ecode = 110
elif response.status >= 500:
default_exc = EtcdResponseError
exc = cls.error_exceptions.get(ecode, default_exc)
if ecode in cls.error_exceptions:
msg = "{msg} : {cause}".format(msg=r.get('message'),
cause=r.get('cause'))
else:
msg = "Unable to decode server response"
e['message'] = msg
raise exc(e)
class EtcdKeysResult(object):
_node_props = {
'key': None,
'value': None,
'expiration': None,
'ttl': None,
'modifiedIndex': None,
'createdIndex': None,
'newKey': False,
'dir': False,
}
def __init__(self, action=None, node=None, prevNode=None, **argkv):
self.action = action
for key, default in self._node_props.items():
if node is not None and key in node:
setattr(self, key, node[key])
else:
setattr(self, key, default)
self._children = []
if self.dir and 'nodes' in node:
self._children = node['nodes']
if prevNode:
self._prev_node = EtcdKeysResult(None, node=prevNode)
"""
#fix this bug
r = c.write('/foo', None, dir=True, ttl=50)
print(r.dir) #True
r2 = c.write('/foo', None, dir=True, ttl=120, prevExist=True)
print(r2.dir) #False
"""
if self._prev_node.dir and not self.dir:
self.dir = True
def parse_response(self, response):
if response.status == httplib.CREATED:
self.newKey = True
headers = response.headers
self.etcd_index = int(headers.get('x-etcd-index', 1))
self.raft_index = int(headers.get('x-raft-index', 1))
self.raft_term = int(headers.get('x-raft-term', 0))
def get_subtree(self, leaves_only=False):
if not self._children:
yield self
return
if not leaves_only:
yield self
for n in self._children:
node = EtcdKeysResult(None, n)
for child in node.get_subtree(leaves_only=leaves_only):
yield child
return
@property
def leaves(self):
return self.get_subtree(leaves_only=True)
def __eq__(self, other):
if not isinstance(other, EtcdKeysResult):
return False
for k in self._node_props.keys():
try:
a = getattr(self, k)
b = getattr(other, k)
if a != b:
return False
except:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
class Response(object):
REDIRECT_STATUSES = (301, 302, 303, 307, 308)
def __init__(self, conn=None, status=0, version=0,
reason=None, headers=None, body=''):
self._conn = conn
self.status = status
self.version = version
self.reason = reason
self.headers = headers
self._body = body
@property
def data(self):
if self._body:
return self._body
if hasattr(self._conn, 'read'):
self._body = self._conn.read()
return self._body
return ''
def get_redirect_location(self):
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return None
@classmethod
def from_http(Cls, h, **argkv):
return Cls(h,
status=h.status,
headers=h.headers,
body=h.read_body(None),
**argkv)
class Client(object):
_MGET = 'GET'
_MPUT = 'PUT'
_MPOST = 'POST'
_MDELETE = 'DELETE'
_write_conditions = set(('prevValue', 'prevIndex', 'prevExist'))
_read_options = set(('recursive', 'wait', 'waitIndex', 'sorted', 'quorum'))
_del_conditions = set(('prevValue', 'prevIndex'))
def __init__(self,
host='127.0.0.1',
port=2379,
version_prefix='/v2',
read_timeout=10,
allow_redirect=True,
protocol='http',
allow_reconnect=True,
basic_auth_account=None,
):
self._protocol = protocol
if protocol == 'https':
raise EtcdSSLError('not supported https right now')
self._machines_cache = []
if not list_type(host):
self._host = host
self._port = int(port)
self._base_uri = '%s://%s:%d' % (self._protocol,
self._host, self._port)
else:
for h in host:
if list_type(h):
_h, _p = (list(h) + [int(port)])[:2]
else:
_h, _p = h, int(port)
self._machines_cache.append('%s://%s:%d' % (self._protocol,
_h, _p))
self._base_uri = self._machines_cache.pop(0)
_, self._host, self._port = self._extract_base_uri()
self.version_prefix = version_prefix
self._keys_path = self.version_prefix + '/keys'
self._stats_path = self.version_prefix + '/stats'
self._mem_path = self.version_prefix + '/members'
self._user_path = self.version_prefix + '/auth/users'
self._role_path = self.version_prefix + '/auth/roles'
self._read_timeout = read_timeout
self._allow_redirect = allow_redirect
self._allow_reconnect = allow_reconnect
self.basic_auth_account = basic_auth_account
if self._allow_reconnect:
if len(self._machines_cache) <= 0:
self._machines_cache = self.machines
if self._base_uri in self._machines_cache:
self._machines_cache.remove(self._base_uri)
else:
self._machines_cache = []
@property
def base_uri(self):
return self._base_uri
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def protocol(self):
return self._protocol
@property
def read_timeout(self):
return self._read_timeout
@property
def allow_redirect(self):
return self._allow_redirect
@property
def machines(self):
res = self.api_execute(self.version_prefix + '/machines',
self._MGET,
need_refresh_machines=False)
nodes = res.data.split(',')
return [n.strip() for n in nodes]
@property
def members(self):
res = self.api_execute(self._mem_path, self._MGET)
return utfjson.load(res.data)['members']
@property
def leader(self):
res = self.api_execute(self._stats_path + '/self', self._MGET)
self_st = utfjson.load(res.data)
leader_id = self_st.get('leaderInfo', {}).get('leader')
if leader_id is None:
return None
mems = self.members
for mem in mems:
if mem['id'] != leader_id:
continue
return mem.copy()
@property
def version(self):
res = self.api_execute('/version', self._MGET)
return utfjson.load(res.data)
@property
def st_leader(self):
leader = self.leader
if leader is None:
return None
leaderhosts = []
for url in leader['clientURLs']:
if not url.startswith(self._protocol):
url = self._protocol + '://' + url
p = urlparse.urlparse(url)
if p.hostname == '127.0.0.1':
continue
port = p.port or self.port
leaderhosts.append((p.hostname, port))
return Client(host=leaderhosts)._st('/leader')
@property
def st_self(self):
return self._st('/self')
@property
def st_store(self):
return self._st('/store')
@property
def names(self):
return [n['name'] for n in self.members]
@property
def ids(self):
return [n['id'] for n in self.members]
@property
def clienturls(self):
return sum([n['clientURLs'] for n in self.members], [])
@property
def peerurls(self):
return sum([n['peerURLs'] for n in self.members], [])
def __contains__(self, key):
try:
self.get(key)
return True
except EcodeKeyNotFound:
return False
def _sanitize_key(self, key):
if not key.startswith('/'):
key = "/{key}".format(key=key)
return key
def _extract_base_uri(self):
p = urlparse.urlparse(self._base_uri)
return p.scheme, p.hostname, p.port
def _parse_url(self, url):
p = urlparse.urlparse(url)
if p.scheme == 'https':
raise EtcdSSLError('not supported https right now. ' + url)
elif p.scheme != 'http':
return None, None, url
port = p.port or self.port
return p.hostname, port, p.path
def _generate_params(self, options, argkv):
params = {}
for k, v in argkv.items():
if k not in options:
continue
if isinstance(v, bool):
params[k] = v and "true" or "false"
continue
params[k] = v
return params
def _st(self, st_path):
st_path = self._sanitize_key(st_path)
response = self.api_execute(self._stats_path + st_path, self._MGET)
return self._to_dict(response)
def _to_keysresult(self, response):
try:
res = utfjson.load(response.data)
r = EtcdKeysResult(**res)
r.parse_response(response)
return r
except ValueError as e:
logger.error(repr(e) + ' while decode {data}'.format(
data=response.data))
raise EtcdIncompleteRead('failed to decode %s' % response.data)
except Exception as e:
logger.error(repr(e) + ' while decode {data}'.format(
data=response.data))
raise EtcdResponseError('failed to decode %s' % response.data)
def _to_dict(self, response):
try:
return utfjson.load(response.data)
except ValueError as e:
logger.error(repr(e) + ' while decode {data}'.format(
data=response.data))
raise EtcdIncompleteRead('failed to decode %s' % response.data)
except Exception as e:
logger.error(repr(e) + ' while decode {data}'.format(
data=response.data))
raise EtcdResponseError('failed to decode %s' % response.data)
def _handle_server_response(self, response):
if response.status in (httplib.OK, httplib.CREATED,
httplib.NO_CONTENT):
return response
logger.debug('invalid response status:{st} body:{body}'.format(
st=response.status, body=response.data))
EtcdError.handle(response)
def _request(self, url, method, params, timeout, bodyinjson):
while True:
host, port, path = self._parse_url(url)
if host is None or port is None or path is None:
raise EtcdException('url is invalid, {url}'.format(url=url))
qs = {}
headers = {}
body = ''
if method in (self._MGET, self._MDELETE):
qs.update(params or {})
# use once, coz params is in location's query string
params = None
headers['Content-Length'] = 0
elif method in (self._MPUT, self._MPOST):
if bodyinjson:
if params is not None:
body = utfjson.dump(params)
headers.update({'Content-Type': 'application/json',
'Content-Length': len(body)})
else:
body = urllib.urlencode(params or {})
headers.update(
{'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': len(body)}
)
else:
raise EtcdRequestError('HTTP method {method} not supported'
''.format(method=method))
if len(qs) > 0:
if '?' in path:
path = path + '&' + urllib.urlencode(qs)
else:
path = path + '?' + urllib.urlencode(qs)
if self.basic_auth_account is not None:
auth = {
'Authorization': 'Basic {ant}'.format(
ant=self.basic_auth_account.encode('base64').strip()),
}
headers.update(auth)
logger.debug('connect -> {mtd} {url}{path} {timeout}'.format(
mtd=method,
url=self._base_uri,
path=path,
timeout=timeout))
h = http.Client(host, port, timeout)
h.send_request(path, method, headers)
h.send_body(body)
h.read_response()
resp = Response.from_http(h)
if not self.allow_redirect:
return resp
if resp.status not in Response.REDIRECT_STATUSES:
return resp
url = resp.get_redirect_location()
if url is None:
raise EtcdResponseError('location not found in {header}'
''.format(header=resp.headers))
logger.debug('redirect -> ' + url)
def _api_execute_with_retry(self,
path,
method,
params=None,
timeout=None,
bodyinjson=False,
raise_read_timeout=False,
**request_kw):
# including _base_uri, there are len(_machines_cache) + 1 hosts to try
# to connect to.
for i in range(len(self._machines_cache) + 1):
url = self._base_uri + path
try:
response = self._request(url, method, params,
timeout, bodyinjson)
break
except (socket.error,
http.HttpError) as e:
if raise_read_timeout and isinstance(e, socket.timeout):
raise EtcdReadTimeoutError(e)
if len(self._machines_cache) > 0:
self._machines_cache.append(self._base_uri)
self._base_uri = self._machines_cache.pop(0)
self._protocol, self._host, self._port = self._extract_base_uri()
logger.info('{err} while connect {cur}, try connect {nxt}'
.format(err=repr(e), cur=url,
nxt=self._base_uri))
else:
logger.info('no more host to retry')
except Exception as e:
logger.exception(repr(e) + ' while send request to etcd')
raise EtcdException(e)
else:
raise NoMoreMachineError('No more machines in the cluster')
return self._handle_server_response(response)
def api_execute(self,
path,
method,
params=None,
timeout=None,
bodyinjson=False,
raise_read_timeout=False,
need_refresh_machines=True,
**request_kw):
if timeout is None:
timeout = self.read_timeout
if timeout == 0:
timeout = None
if not path.startswith('/'):
raise ValueError('Path does not start with /')
for i in range(0, 2):
try:
return self._api_execute_with_retry(
path,
method,
params=params,
timeout=timeout,
bodyinjson=bodyinjson,
raise_read_timeout=raise_read_timeout,
**request_kw)
except NoMoreMachineError as e:
logger.info(repr(e) + ' while send_request path:{path}, '
'method:{mtd}'.format(path=path, mtd=method))
if i == 1 or not need_refresh_machines or not self._allow_reconnect:
raise
new_machines = self.machines
old_machines = self._machines_cache + [self._base_uri]
if set(new_machines) == set(old_machines):
raise
self._machines_cache = new_machines
self._base_uri = self._machines_cache.pop(0)
self._protocol, self._host, self._port = self._extract_base_uri()
def read(self, key, **argkv):
key = self._sanitize_key(key)
params = self._generate_params(self._read_options, argkv)
timeout = argkv.get('timeout')
response = self.api_execute(self._keys_path + key, self._MGET,
params=params, timeout=timeout)
return self._to_keysresult(response)
get = read
def write(self, key, value=None, ttl=None,
dir=False, append=False, refresh=False, **argkv):
key = self._sanitize_key(key)
params = {}
if ttl is not None:
params['ttl'] = ttl
if dir and value is not None:
raise EtcdRequestError(
'Cannot create a directory with a value ' + repr(value))
elif value is not None:
params['value'] = value
elif dir:
params['dir'] = "true"
if refresh:
params['refresh'] = "true"
params.update(self._generate_params(self._write_conditions, argkv))
method = append and self._MPOST or self._MPUT
if '_endpoint' in argkv:
path = argkv['_endpoint'] + key
else:
path = self._keys_path + key
response = self.api_execute(path, method, params=params)
return self._to_keysresult(response)
def test_and_set(self, key, value, ttl=None, **argkv):
return self.write(key, value=value, ttl=ttl, **argkv)
def set(self, key, value, ttl=None):
return self.write(key, value=value, ttl=ttl)
def update(self, res):
argkv = {
'dir': res.dir,
'ttl': res.ttl,
'prevExist': True,
}
if not res.dir:
# prevIndex on a dir causes a 'not a file' error. d'oh!
argkv['prevIndex'] = res.modifiedIndex
return self.write(res.key, value=res.value, **argkv)
def delete(self, key, recursive=None, dir=None, **argkv):
key = self._sanitize_key(key)
params = {}
if recursive is not None:
params['recursive'] = recursive and "true" or "false"
if dir is not None:
params['dir'] = dir and "true" or "false"
params.update(self._generate_params(self._del_conditions, argkv))
response = self.api_execute(self._keys_path + key, self._MDELETE,
params=params)
return self._to_keysresult(response)
def test_and_delete(self, key, **argkv):
return self.delete(key, **argkv)
def watch(self, key, waitindex=None, timeout=None, **argkv):
newest_v = self.get(key)
if waitindex is None:
waitindex = argkv.get('waitIndex')
if waitindex is not None and 0 < waitindex <= newest_v.modifiedIndex:
return newest_v
else:
waitindex = newest_v.etcd_index + 1
return self._watch(key, waitindex, timeout, **argkv)
def _watch(self, key, waitindex=None, timeout=None, **argkv):
key = self._sanitize_key(key)
params = self._generate_params(self._read_options, argkv)
params['wait'] = 'true'
if waitindex is not None:
params['waitIndex'] = waitindex
# timeout is 0 means infinite waiting
while True and timeout == 0:
try:
response = self.api_execute(self._keys_path + key,
self._MGET, params=params,
timeout=timeout)
return self._to_keysresult(response)
except EtcdIncompleteRead:
pass
timeout = timeout or self.read_timeout
while True:
st = time.time()
try:
response = self.api_execute(self._keys_path + key, self._MGET,
params=params, timeout=timeout,
raise_read_timeout=True)
return self._to_keysresult(response)
except (EtcdIncompleteRead, EtcdReadTimeoutError):
timeout = timeout - (time.time() - st)
if timeout <= 0:
raise EtcdReadTimeoutError('Watch Timeout: ' + key)
def eternal_watch(self, key, waitindex=None, until=None, **argkv):
local_index = waitindex
while True:
res = self._watch(key, waitindex=local_index, timeout=0, **argkv)
if until is not None and res.modifiedIndex is not None:
if res.modifiedIndex >= until:
yield res
return
if local_index is not None:
local_index = (res.modifiedIndex or local_index) + 1
yield res
def mkdir(self, key, ttl=None, **argkv):
return self.write(key, ttl=ttl, dir=True, **argkv)
def refresh(self, key, ttl=None, **argkv):
argkv['prevExist'] = True
return self.write(key, ttl=ttl, refresh=True, **argkv)
def lsdir(self, key, **argkv):
return self.read(key, **argkv)
def rlsdir(self, key, **argkv):
argkv['recursive'] = True
return self.read(key, **argkv)
def deldir(self, key, **argkv):
return self.delete(key, dir=True, **argkv)
def rdeldir(self, key, **argkv):
argkv['recursive'] = True
return self.delete(key, dir=True, **argkv)
def add_member(self, *peerurls):
if len(peerurls) == 0:
raise EtcdException('no peer url found')
data = {'peerURLs': peerurls}
response = self.api_execute(self._mem_path, self._MPOST,
params=data, bodyinjson=True)
return self._to_dict(response)
def del_member(self, mid):
if mid not in self.ids:
logger.info('{mid} not in the cluster when delete member'.format(
mid=mid))
return
mid = self._sanitize_key(mid)
self.api_execute(self._mem_path + mid, self._MDELETE)
def change_peerurls(self, mid, *peerurls):
if mid not in self.ids:
logger.info('{mid} not in the cluster when change peerurls'.format(
mid=mid))
return
if len(peerurls) == 0:
raise EtcdException('no peer url found')
mid = self._sanitize_key(mid)
data = {'peerURLs': peerurls}
self.api_execute(self._mem_path + mid, self._MPUT,
params=data, bodyinjson=True)
def _root_auth(self, password):
return 'root:%s' % (password)
def create_root(self, password):
path = self._user_path + '/root'
params = {'user': 'root', 'password': password}
res = self.api_execute(path, self._MPUT,
params=params, bodyinjson=True)
return self._to_dict(res)
def enable_auth(self, root_password):
self.basic_auth_account = self._root_auth(root_password)
self.api_execute('/v2/auth/enable', self._MPUT)
def disable_auth(self, root_password):
self.basic_auth_account = self._root_auth(root_password)
self.api_execute('/v2/auth/enable', self._MDELETE)
def create_user(self, name, password, root_password, roles=None):
self.basic_auth_account = self._root_auth(root_password)
path = self._user_path + self._sanitize_key(name)
if roles is not None:
params = {"user": name, "password": password, "roles": roles}
else:
params = {"user": name, "password": password}
res = self.api_execute(
path, self._MPUT, params=params, bodyinjson=True)
return self._to_dict(res)
def create_role(self, name, root_password, permissions=None):
self.basic_auth_account = self._root_auth(root_password)
path = self._role_path + self._sanitize_key(name)
if permissions is not None:
params = {"role": name, "permissions": {"kv": permissions}}
else:
params = {"role": name}
res = self.api_execute(path, self._MPUT,
params=params, bodyinjson=True)
return self._to_dict(res)
def get_user(self, name, root_password):
self.basic_auth_account = self._root_auth(root_password)
if name is not None:
path = self._user_path + self._sanitize_key(name)
else:
path = self._user_path
res = self.api_execute(path, self._MGET)
return self._to_dict(res)
def get_role(self, name, root_password):
self.basic_auth_account = self._root_auth(root_password)
if name is not None:
path = self._role_path + self._sanitize_key(name)
else:
path = self._role_path
res = self.api_execute(path, self._MGET)
return self._to_dict(res)
def grant_user_roles(self, name, root_password, roles):
self.basic_auth_account = self._root_auth(root_password)
path = self._user_path + self._sanitize_key(name)
params = {'user': name, 'grant': roles}
res = self.api_execute(path, self._MPUT,
params=params, bodyinjson=True)
return self._to_dict(res)
def revoke_user_roles(self, name, root_password, roles):
self.basic_auth_account = self._root_auth(root_password)
path = self._user_path + self._sanitize_key(name)
params = {'user': name, 'revoke': roles}
res = self.api_execute(path, self._MPUT,
params=params, bodyinjson=True)
return self._to_dict(res)
def grant_role_permissions(self, name, root_password, permissions):
self.basic_auth_account = self._root_auth(root_password)
path = self._role_path + self._sanitize_key(name)
params = {"role": name, "grant": {"kv": permissions}}
res = self.api_execute(path, self._MPUT,
params=params, bodyinjson=True)
return self._to_dict(res)
def revoke_role_permissions(self, name, root_password, permissions):
self.basic_auth_account = self._root_auth(root_password)
path = self._role_path + self._sanitize_key(name)
params = {"role": name, "revoke": {"kv": permissions}}
res = self.api_execute(path, self._MPUT,
params=params, bodyinjson=True)
return self._to_dict(res)
def delete_user(self, user_name, root_password):
self.basic_auth_account = self._root_auth(root_password)
path = self._user_path + self._sanitize_key(user_name)
self.api_execute(path, self._MDELETE)
def delete_role(self, role_name, root_password):
self.basic_auth_account = self._root_auth(root_password)
path = self._role_path + self._sanitize_key(role_name)
self.api_execute(path, self._MDELETE)
|
altsen/diandiyun-platform
|
refs/heads/master
|
common/djangoapps/dark_lang/models.py
|
6
|
"""
Models for the dark-launching languages
"""
from django.db import models
from config_models.models import ConfigurationModel
class DarkLangConfig(ConfigurationModel):
"""
Configuration for the dark_lang django app
"""
released_languages = models.TextField(
blank=True,
help_text="A comma-separated list of language codes to release to the public."
)
@property
def released_languages_list(self):
"""
``released_languages`` as a list of language codes.
Example: ['it', 'de-at', 'es', 'pt-br']
"""
if not self.released_languages.strip(): # pylint: disable=no-member
return []
return [lang.strip() for lang in self.released_languages.split(',')] # pylint: disable=no-member
|
dparshin/phantomjs
|
refs/heads/master
|
src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/reflection.py
|
260
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
from google.protobuf import descriptor as descriptor_mod
_FieldDescriptor = descriptor_mod.FieldDescriptor
if api_implementation.Type() == 'cpp':
from google.protobuf.internal import cpp_message
_NewMessage = cpp_message.NewMessage
_InitMessage = cpp_message.InitMessage
else:
from google.protobuf.internal import python_message
_NewMessage = python_message.NewMessage
_InitMessage = python_message.InitMessage
class GeneratedProtocolMessageType(type):
"""Metaclass for protocol message classes created at runtime from Descriptors.
We add implementations for all methods described in the Message class. We
also create properties to allow getting/setting all fields in the protocol
message. Finally, we create slots to prevent users from accidentally
"setting" nonexistent fields in the protocol message, which then wouldn't get
serialized / deserialized properly.
The protocol compiler currently uses this metaclass to create protocol
message classes at runtime. Clients can also manually create their own
classes at runtime, as in this example:
mydescriptor = Descriptor(.....)
class MyProtoClass(Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = mydescriptor
myproto_instance = MyProtoClass()
myproto.foo_field = 23
...
"""
# Must be consistent with the protocol-compiler code in
# proto2/compiler/internal/generator.*.
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __new__(cls, name, bases, dictionary):
"""Custom allocation for runtime-generated class types.
We override __new__ because this is apparently the only place
where we can meaningfully set __slots__ on the class we're creating(?).
(The interplay between metaclasses and slots is not very well-documented).
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
Returns:
Newly-allocated class.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
_NewMessage(descriptor, dictionary)
superclass = super(GeneratedProtocolMessageType, cls)
new_class = superclass.__new__(cls, name, bases, dictionary)
setattr(descriptor, '_concrete_class', new_class)
return new_class
def __init__(cls, name, bases, dictionary):
"""Here we perform the majority of our work on the class.
We add enum getters, an __init__ method, implementations
of all Message methods, and properties for all fields
in the protocol type.
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
_InitMessage(descriptor, cls)
superclass = super(GeneratedProtocolMessageType, cls)
superclass.__init__(name, bases, dictionary)
|
Maccimo/intellij-community
|
refs/heads/master
|
python/helpers/pycharm/django_test_runner.py
|
23
|
from tcunittest import TeamcityTestRunner, TeamcityTestResult
from tcmessages import TeamcityServiceMessages
import sys
from pycharm_run_utils import adjust_django_sys_path
from django.test.utils import get_runner
adjust_django_sys_path()
from django.conf import settings
def is_nosetest(settings):
"""
Checks if Django configured to work with nosetest
:param settings: django settings
:return: True if django should works with NoseTest runner of its inheritor
"""
try:
runner = get_runner(settings)
from django_nose import NoseTestSuiteRunner
if issubclass(runner, NoseTestSuiteRunner):
return True
except (AttributeError, ImportError):
pass
return False
from django.test.testcases import TestCase
from django import VERSION
if is_nosetest(settings):
from nose_utils import TeamcityNoseRunner
# See: https://docs.djangoproject.com/en/1.8/releases/1.7/#django-utils-unittest
# django.utils.unittest provided uniform access to the unittest2 library on all Python versions.
# Since unittest2 became the standard library's unittest module in Python 2.7,
# and Django 1.7 drops support for older Python versions, this module isn't useful anymore.
# It has been deprecated. Use unittest instead.
if VERSION >= (1,7):
import unittest
else:
from django.utils import unittest
def get_test_suite_runner():
if hasattr(settings, "TEST_RUNNER"):
from django.test.utils import get_runner
class TempSettings:
TEST_RUNNER = settings.TEST_RUNNER
return get_runner(TempSettings)
try:
if VERSION >= (1,6):
from django.test.runner import DiscoverRunner as DjangoSuiteRunner
else:
from django.test.simple import DjangoTestSuiteRunner as DjangoSuiteRunner
from inspect import isfunction
SUITE_RUNNER = get_test_suite_runner()
if isfunction(SUITE_RUNNER):
import sys
sys.stderr.write(
"WARNING: TEST_RUNNER variable is ignored. PyCharm test runner supports "
"only class-like TEST_RUNNER valiables. Use Tools->run manage.py tasks.\n")
SUITE_RUNNER = None
BaseSuiteRunner = SUITE_RUNNER or DjangoSuiteRunner
class BaseRunner(TeamcityTestRunner, BaseSuiteRunner):
def __init__(self, stream=sys.stdout, **options):
TeamcityTestRunner.__init__(self, stream)
BaseSuiteRunner.__init__(self, **options)
except ImportError:
# for Django <= 1.1 compatibility
class BaseRunner(TeamcityTestRunner):
def __init__(self, stream=sys.stdout, **options):
TeamcityTestRunner.__init__(self, stream)
def strclass(cls):
if not cls.__name__:
return cls.__module__
return "%s.%s" % (cls.__module__, cls.__name__)
class DjangoTeamcityTestResult(TeamcityTestResult):
def __init__(self, *args, **kwargs):
super(DjangoTeamcityTestResult, self).__init__(**kwargs)
def _getSuite(self, test):
if hasattr(test, "suite"):
suite = strclass(test.suite)
suite_location = test.suite.location
location = test.suite.abs_location
if hasattr(test, "lineno"):
location = location + ":" + str(test.lineno)
else:
location = location + ":" + str(test.test.lineno)
else:
suite = strclass(test.__class__)
suite_location = "django_testid://" + suite
location = "django_testid://" + str(test.id())
return (suite, location, suite_location)
class DjangoTeamcityTestRunner(BaseRunner):
def __init__(self, stream=sys.stdout, **options):
super(DjangoTeamcityTestRunner, self).__init__(stream, **options)
self.options = options
def _makeResult(self, **kwargs):
return DjangoTeamcityTestResult(self.stream, **kwargs)
def build_suite(self, *args, **kwargs):
EXCLUDED_APPS = getattr(settings, 'TEST_EXCLUDE', [])
suite = super(DjangoTeamcityTestRunner, self).build_suite(*args, **kwargs)
if not args[0] and not getattr(settings, 'RUN_ALL_TESTS', False):
tests = []
for case in suite:
pkg = case.__class__.__module__.split('.')[0]
if pkg not in EXCLUDED_APPS:
tests.append(case)
suite._tests = tests
return suite
def run_suite(self, suite, **kwargs):
if is_nosetest(settings):
from django_nose.plugin import DjangoSetUpPlugin, ResultPlugin
from django_nose.runner import _get_plugins_from_settings
from nose.config import Config
import nose
result_plugin = ResultPlugin()
plugins_to_add = [DjangoSetUpPlugin(self), result_plugin]
config = Config(plugins=nose.core.DefaultPluginManager())
config.plugins.addPlugins(extraplugins=plugins_to_add)
for plugin in _get_plugins_from_settings():
plugins_to_add.append(plugin)
nose.core.TestProgram(argv=suite, exit=False, addplugins=plugins_to_add,
testRunner=TeamcityNoseRunner(config=config))
return result_plugin.result
else:
self.options.update(kwargs)
return TeamcityTestRunner.run(self, suite, **self.options)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
if is_nosetest(settings):
return super(DjangoTeamcityTestRunner, self).run_tests(test_labels, extra_tests)
return super(DjangoTeamcityTestRunner, self).run_tests(test_labels, extra_tests, **kwargs)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
classes is a sequence of types
All tests of type clases[0] are placed first, then tests of type classes[1], etc.
Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count + 1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i + 1])
return bins[0]
def run_the_old_way(extra_tests, kwargs, test_labels, verbosity):
from django.test.simple import build_suite, build_test, get_app, get_apps, \
setup_test_environment, teardown_test_environment
setup_test_environment()
settings.DEBUG = False
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
for test in extra_tests:
suite.addTest(test)
suite = reorder_suite(suite, (TestCase,))
old_name = settings.DATABASE_NAME
from django.db import connection
connection.creation.create_test_db(verbosity, autoclobber=False)
result = DjangoTeamcityTestRunner().run(suite, **kwargs)
connection.creation.destroy_test_db(old_name, verbosity)
teardown_test_environment()
return len(result.failures) + len(result.errors)
def run_tests(test_labels, verbosity=1, interactive=False, extra_tests=[],
**kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
options = {
'verbosity': verbosity,
'interactive': interactive
}
options.update(kwargs)
TeamcityServiceMessages(sys.stdout).testMatrixEntered()
return DjangoTeamcityTestRunner(**options).run_tests(test_labels,
extra_tests=extra_tests, **options)
|
bdero/edx-platform
|
refs/heads/master
|
cms/lib/xblock/__init__.py
|
12133432
| |
rodrigobraga/informer
|
refs/heads/master
|
informer/migrations/__init__.py
|
12133432
| |
chaffra/sympy
|
refs/heads/master
|
sympy/logic/tests/__init__.py
|
12133432
| |
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/installation/mining_organic/__init__.py
|
12133432
| |
openstack/ceilometer
|
refs/heads/master
|
ceilometer/meter/__init__.py
|
12133432
| |
willthames/ansible
|
refs/heads/devel
|
test/units/modules/cloud/amazon/test_cloudformation.py
|
89
|
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import pytest
from mock import patch
from . placebo_fixtures import placeboify, maybe_sleep
from ansible.modules.cloud.amazon import cloudformation as cfn_module
basic_yaml_tpl = """
---
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Basic template that creates an S3 bucket'
Resources:
MyBucket:
Type: "AWS::S3::Bucket"
Outputs:
TheName:
Value:
!Ref MyBucket
"""
bad_json_tpl = """{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Broken template, no comma here ->"
"Resources": {
"MyBucket": {
"Type": "AWS::S3::Bucket"
}
}
}"""
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('FAIL')
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('EXIT')
def test_invalid_template_json(placeboify):
connection = placeboify.client('cloudformation')
params = {
'StackName': 'ansible-test-wrong-json',
'TemplateBody': bad_json_tpl,
}
m = FakeModule(disable_rollback=False)
with pytest.raises(Exception, message='Malformed JSON should cause the test to fail') as exc_info:
cfn_module.create_stack(m, params, connection)
assert exc_info.match('FAIL')
assert "ValidationError" in m.exit_kwargs['msg']
def test_basic_s3_stack(maybe_sleep, placeboify):
connection = placeboify.client('cloudformation')
params = {
'StackName': 'ansible-test-basic-yaml',
'TemplateBody': basic_yaml_tpl,
}
m = FakeModule(disable_rollback=False)
result = cfn_module.create_stack(m, params, connection)
assert result['changed']
assert len(result['events']) > 1
# require that the final recorded stack state was CREATE_COMPLETE
# events are retrieved newest-first, so 0 is the latest
assert 'CREATE_COMPLETE' in result['events'][0]
connection.delete_stack(StackName='ansible-test-basic-yaml')
def test_delete_nonexistent_stack(maybe_sleep, placeboify):
connection = placeboify.client('cloudformation')
result = cfn_module.stack_operation(connection, 'ansible-test-nonexist', 'DELETE')
assert result['changed']
assert 'Stack does not exist.' in result['log']
def test_get_nonexistent_stack(placeboify):
connection = placeboify.client('cloudformation')
assert cfn_module.get_stack_facts(connection, 'ansible-test-nonexist') is None
def test_missing_template_body(placeboify):
m = FakeModule()
with pytest.raises(Exception, message='Expected module to fail with no template') as exc_info:
cfn_module.create_stack(
module=m,
stack_params={},
cfn=None
)
assert exc_info.match('FAIL')
assert not m.exit_args
assert "Either 'template' or 'template_url' is required when the stack does not exist." == m.exit_kwargs['msg']
|
ovpiNU/ros-dd-extraction-tool
|
refs/heads/master
|
driving_data/src/driving_data_tools/scripts/rosbag_data_extract.py
|
1
|
__author__ = 'patty'
import sys
import os
import rospy
import numpy as np
import cv2
import pcl
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import Image, PointCloud2
from cv_bridge import CvBridge
from can_msg.msg import CANPacket
from nmea_msgs.msg import Sentence
save_path = None
camera_img_0 = None
camera_img_1 = None
camera_img_2 = None
cloud = None
nmea = None
can = None
camera_img_0_ready = False
camera_img_1_ready = False
camera_img_2_ready = False
cloud_ready = False
can_ready = False
nmea_ready = False
def img_loader_0(image_msg):
global camera_img_0
global camera_img_0_ready
camera_img_0_ready = False
bridge = CvBridge()
camera_img_0 = bridge.imgmsg_to_cv2(image_msg, "bgr8")
camera_img_0_ready = True
def img_loader_1(image_msg):
global camera_img_1
global camera_img_1_ready
camera_img_1_ready = False
bridge = CvBridge()
camera_img_1 = bridge.imgmsg_to_cv2(image_msg, "bgr8")
camera_img_1_ready = True
def img_loader_2(image_msg):
global camera_img_2
global camera_img_2_ready
camera_img_2_ready = False
bridge = CvBridge()
camera_img_2 = bridge.imgmsg_to_cv2(image_msg, "bgr8")
camera_img_2_ready = True
def can_loader(msg):
global can
global can_ready
can_ready = False
can = msg
can_ready = True
def nmea_loader(msg):
global nmea
global nmea_ready
nmea = msg.sentence
nmea_ready = True
def cloud_loader(msg):
global cloud
global cloud_ready
cloud_ready = False
cloud = msg
cloud_ready = True
save_data()
reset_flags()
def save_pcd(cloud, timestamp, path):
p = pcl.PointCloud(np.array(list(pc2.read_points(cloud)), dtype=np.float32)[:, 0:3])
p.to_file(path + '/pcd' + '/pcd' + '_' + "{:.5f}".format(timestamp) + '.pcd')
def save_image(img, timestamp, path, sfx):
cv2.imwrite(path + '/camera' + sfx + '/camera_' + sfx + '_' + "{:.5f}".format(timestamp) + '.png', img)
def save_can(can, timestamp, path):
f = open(path+'/can'+'/can.bin', 'ab')
f.write("{:.5f}".format(timestamp)+','+str(can.id)+','+str(can.dat))
f.close()
def save_nmea(nmea, timestamp, path):
f = open(path + '/nmea' + '/nmea.csv', 'a')
f.write("{:.5f}".format(timestamp) + ',' + nmea)
f.write('\n')
f.close()
def save_data():
if cloud_ready and camera_img_0_ready and camera_img_1_ready and camera_img_2_ready and can_ready and nmea_ready:
this_cloud = cloud
this_camera_img_0 = camera_img_0.copy()
this_camera_img_1 = camera_img_1.copy()
this_camera_img_2 = camera_img_2.copy()
this_can = can
this_nmea = nmea
timestamp = cloud.header.stamp.secs + ((cloud.header.stamp.nsecs + 0.0) / 1000000000)
save_pcd(this_cloud, timestamp, save_path)
save_image(this_camera_img_0, timestamp, save_path, '0')
save_image(this_camera_img_1, timestamp, save_path, '1')
save_image(this_camera_img_2, timestamp, save_path, '2')
save_can(this_can, timestamp, save_path)
save_nmea(this_nmea, timestamp, save_path)
def reset_flags():
globals()['camera_img_0_ready'] = False
globals()['camera_img_1_ready'] = False
globals()['camera_img_2_ready'] = False
globals()['cloud_ready'] = False
# globals()['can_ready'] = False
# globals()['nmea_ready'] = False
def rosbag_data_extract():
global save_path
try:
save_path = sys.argv[1]
except Exception, e:
sys.exit("Please specify the save path. Example: rosbag_data_extract.py /media/0/output/")
if not os.path.exists(save_path):
os.makedirs(save_path)
if not os.path.exists(save_path + '/camera0'):
os.makedirs(save_path + '/camera0')
if not os.path.exists(save_path + '/camera1'):
os.makedirs(save_path + '/camera1')
if not os.path.exists(save_path + '/camera2'):
os.makedirs(save_path + '/camera2')
if not os.path.exists(save_path + '/can'):
os.makedirs(save_path + '/can')
if not os.path.exists(save_path + '/pcd'):
os.makedirs(save_path + '/pcd')
if not os.path.exists(save_path + '/nmea'):
os.makedirs(save_path + '/nmea')
rospy.init_node('rosbag_data_extract', anonymous=True)
rospy.Subscriber("/camera0/image_raw", Image, img_loader_0)
rospy.Subscriber("/camera1/image_raw", Image, img_loader_1)
rospy.Subscriber("/camera2/image_raw", Image, img_loader_2)
rospy.Subscriber("/points_raw", PointCloud2, cloud_loader)
rospy.Subscriber("/can_raw", CANPacket, can_loader)
rospy.Subscriber("/nmea_sentence", Sentence, nmea_loader)
rospy.spin()
if __name__ == '__main__':
rosbag_data_extract()
|
cloudbase/neutron-virtualbox
|
refs/heads/virtualbox_agent
|
neutron/plugins/ofagent/agent/arp_lib.py
|
9
|
# Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ryu.app.ofctl import api as ryu_api
from ryu.lib import dpid as dpid_lib
from ryu.lib.packet import arp
from ryu.lib.packet import ethernet
from ryu.lib.packet import packet
from ryu.lib.packet import vlan
from neutron.common import log
from neutron.i18n import _LI
from neutron.openstack.common import log as logging
import neutron.plugins.ofagent.agent.metadata as meta
LOG = logging.getLogger(__name__)
class ArpLib(object):
def __init__(self, ryuapp):
"""Constructor.
Define the internal table mapped an ip and a mac in a network.
self._arp_tbl:
{network1: {ip_addr: mac, ...},
network2: {ip_addr: mac, ...},
...,
}
:param ryuapp: object of the ryu app.
"""
self.ryuapp = ryuapp
self._arp_tbl = {}
self.br = None
def set_bridge(self, br):
self.br = br
@log.log
def _send_arp_reply(self, datapath, port, pkt):
ofp = datapath.ofproto
ofpp = datapath.ofproto_parser
pkt.serialize()
data = pkt.data
actions = [ofpp.OFPActionOutput(port=port)]
out = ofpp.OFPPacketOut(datapath=datapath,
buffer_id=ofp.OFP_NO_BUFFER,
in_port=ofp.OFPP_CONTROLLER,
actions=actions,
data=data)
ryu_api.send_msg(self.ryuapp, out)
@log.log
def _send_unknown_packet(self, msg, in_port, out_port):
datapath = msg.datapath
ofp = datapath.ofproto
ofpp = datapath.ofproto_parser
data = None
if msg.buffer_id == ofp.OFP_NO_BUFFER:
data = msg.data
actions = [ofpp.OFPActionOutput(port=out_port)]
out = ofpp.OFPPacketOut(datapath=datapath,
buffer_id=msg.buffer_id,
in_port=in_port,
actions=actions,
data=data)
ryu_api.send_msg(self.ryuapp, out)
def _respond_arp(self, datapath, port, arptbl,
pkt_ethernet, pkt_vlan, pkt_arp):
if pkt_arp.opcode != arp.ARP_REQUEST:
LOG.debug("unknown arp op %s", pkt_arp.opcode)
return False
ip_addr = pkt_arp.dst_ip
hw_addr = arptbl.get(ip_addr)
if hw_addr is None:
LOG.debug("unknown arp request %s", ip_addr)
return False
LOG.debug("responding arp request %(ip_addr)s -> %(hw_addr)s",
{'ip_addr': ip_addr, 'hw_addr': hw_addr})
pkt = packet.Packet()
pkt.add_protocol(ethernet.ethernet(ethertype=pkt_ethernet.ethertype,
dst=pkt_ethernet.src,
src=hw_addr))
if pkt_vlan:
pkt.add_protocol(vlan.vlan(cfi=pkt_vlan.cfi,
ethertype=pkt_vlan.ethertype,
pcp=pkt_vlan.pcp,
vid=pkt_vlan.vid))
pkt.add_protocol(arp.arp(opcode=arp.ARP_REPLY,
src_mac=hw_addr,
src_ip=ip_addr,
dst_mac=pkt_arp.src_mac,
dst_ip=pkt_arp.src_ip))
self._send_arp_reply(datapath, port, pkt)
return True
@log.log
def add_arp_table_entry(self, network, ip, mac):
if network in self._arp_tbl:
self._arp_tbl[network][ip] = mac
else:
self._arp_tbl[network] = {ip: mac}
@log.log
def del_arp_table_entry(self, network, ip):
if network not in self._arp_tbl:
LOG.debug("removal of unknown network %s", network)
return
if self._arp_tbl[network].pop(ip, None) is None:
LOG.debug("removal of unknown ip %s", ip)
return
if not self._arp_tbl[network]:
del self._arp_tbl[network]
def packet_in_handler(self, ev):
"""Check a packet-in message.
Build and output an arp reply if a packet-in message is
an arp packet.
"""
msg = ev.msg
LOG.debug("packet-in msg %s", msg)
datapath = msg.datapath
if self.br is None:
LOG.info(_LI("No bridge is set"))
return
if self.br.datapath.id != datapath.id:
LOG.info(_LI("Unknown bridge %(dpid)s ours %(ours)s"),
{"dpid": datapath.id, "ours": self.br.datapath.id})
return
ofp = datapath.ofproto
port = msg.match['in_port']
metadata = msg.match.get('metadata')
# NOTE(yamamoto): Ryu packet library can raise various exceptions
# on a corrupted packet.
try:
pkt = packet.Packet(msg.data)
except Exception as e:
LOG.debug("Unparsable packet: got exception %s", e)
return
LOG.debug("packet-in dpid %(dpid)s in_port %(port)s pkt %(pkt)s",
{'dpid': dpid_lib.dpid_to_str(datapath.id),
'port': port, 'pkt': pkt})
if metadata is None:
LOG.info(_LI("drop non tenant packet"))
return
network = metadata & meta.NETWORK_MASK
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
if not pkt_ethernet:
LOG.debug("drop non-ethernet packet")
return
pkt_vlan = pkt.get_protocol(vlan.vlan)
pkt_arp = pkt.get_protocol(arp.arp)
if not pkt_arp:
LOG.debug("drop non-arp packet")
return
arptbl = self._arp_tbl.get(network)
if arptbl:
if self._respond_arp(datapath, port, arptbl,
pkt_ethernet, pkt_vlan, pkt_arp):
return
else:
LOG.info(_LI("unknown network %s"), network)
# add a flow to skip a packet-in to a controller.
self.br.arp_passthrough(network=network, tpa=pkt_arp.dst_ip)
# send an unknown arp packet to the table.
self._send_unknown_packet(msg, port, ofp.OFPP_TABLE)
|
NokeCodes/noke-platform
|
refs/heads/master
|
projects/admin.py
|
3
|
from django.contrib import admin
from .models import Project
admin.site.register(Project)
|
DMOJ/judge
|
refs/heads/master
|
dmoj/commands/__init__.py
|
1
|
from typing import List, Type
from dmoj.commands.base_command import Command, commands, register_command
from dmoj.commands.diff import DifferenceCommand
from dmoj.commands.help import HelpCommand
from dmoj.commands.problems import ListProblemsCommand
from dmoj.commands.quit import QuitCommand
from dmoj.commands.rejudge import RejudgeCommand
from dmoj.commands.resubmit import ResubmitCommand
from dmoj.commands.show import ShowCommand
from dmoj.commands.submissions import ListSubmissionsCommand
from dmoj.commands.submit import SubmitCommand
all_commands: List[Type[Command]] = [
ListProblemsCommand,
ListSubmissionsCommand,
SubmitCommand,
ResubmitCommand,
RejudgeCommand,
DifferenceCommand,
ShowCommand,
HelpCommand,
QuitCommand,
]
|
tjsavage/rototutor_djangononrel
|
refs/heads/master
|
django/utils/http.py
|
6
|
import re
import urllib
import base64
from binascii import Error as BinasciiError
from email.Utils import formatdate
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an integer
"""
return int(s, 36)
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i / j])
i = i % j
factor -= 1
return ''.join(base36)
def urlsafe_base64_encode(s):
return base64.urlsafe_b64encode(s).rstrip('\n=')
def urlsafe_base64_decode(s):
assert isinstance(s, str)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, '='))
except (LookupError, BinasciiError), e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
|
oeeagle/quantum
|
refs/heads/master
|
neutron/tests/unit/bigswitch/test_agent_scheduler.py
|
13
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.tests.unit.bigswitch import test_base
from neutron.tests.unit.openvswitch import test_agent_scheduler
class BigSwitchDhcpAgentNotifierTestCase(
test_agent_scheduler.OvsDhcpAgentNotifierTestCase,
test_base.BigSwitchTestBase):
plugin_str = ('%s.NeutronRestProxyV2' %
test_base.RESTPROXY_PKG_PATH)
def setUp(self):
self.setup_config_files()
self.setup_patches()
super(BigSwitchDhcpAgentNotifierTestCase, self).setUp()
|
rnder/data-science-from-scratch
|
refs/heads/master
|
code-python3/network_analysis.py
|
12
|
import math, random, re
from collections import defaultdict, Counter, deque
from linear_algebra import dot, get_row, get_column, make_matrix, magnitude, scalar_multiply, shape, distance
from functools import partial
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# give each user a friends list
for user in users:
user["friends"] = []
# and populate it
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
#
# Betweenness Centrality
#
def shortest_paths_from(from_user):
# a dictionary from "user_id" to *all* shortest paths to that user
shortest_paths_to = { from_user["id"] : [[]] }
# a queue of (previous user, next user) that we need to check.
# starts out with all pairs (from_user, friend_of_from_user)
frontier = deque((from_user, friend)
for friend in from_user["friends"])
# keep going until we empty the queue
while frontier:
prev_user, user = frontier.popleft() # take from the beginning
user_id = user["id"]
# the fact that we're pulling from our queue means that
# necessarily we already know a shortest path to prev_user
paths_to_prev = shortest_paths_to[prev_user["id"]]
paths_via_prev = [path + [user_id] for path in paths_to_prev]
# it's possible we already know a shortest path to here as well
old_paths_to_here = shortest_paths_to.get(user_id, [])
# what's the shortest path to here that we've seen so far?
if old_paths_to_here:
min_path_length = len(old_paths_to_here[0])
else:
min_path_length = float('inf')
# any new paths to here that aren't too long
new_paths_to_here = [path_via_prev
for path_via_prev in paths_via_prev
if len(path_via_prev) <= min_path_length
and path_via_prev not in old_paths_to_here]
shortest_paths_to[user_id] = old_paths_to_here + new_paths_to_here
# add new neighbors to the frontier
frontier.extend((user, friend)
for friend in user["friends"]
if friend["id"] not in shortest_paths_to)
return shortest_paths_to
for user in users:
user["shortest_paths"] = shortest_paths_from(user)
for user in users:
user["betweenness_centrality"] = 0.0
for source in users:
source_id = source["id"]
for target_id, paths in source["shortest_paths"].items():
if source_id < target_id: # don't double count
num_paths = len(paths) # how many shortest paths?
contrib = 1 / num_paths # contribution to centrality
for path in paths:
for id in path:
if id not in [source_id, target_id]:
users[id]["betweenness_centrality"] += contrib
#
# closeness centrality
#
def farness(user):
"""the sum of the lengths of the shortest paths to each other user"""
return sum(len(paths[0])
for paths in user["shortest_paths"].values())
for user in users:
user["closeness_centrality"] = 1 / farness(user)
#
# matrix multiplication
#
def matrix_product_entry(A, B, i, j):
return dot(get_row(A, i), get_column(B, j))
def matrix_multiply(A, B):
n1, k1 = shape(A)
n2, k2 = shape(B)
if k1 != n2:
raise ArithmeticError("incompatible shapes!")
return make_matrix(n1, k2, partial(matrix_product_entry, A, B))
def vector_as_matrix(v):
"""returns the vector v (represented as a list) as a n x 1 matrix"""
return [[v_i] for v_i in v]
def vector_from_matrix(v_as_matrix):
"""returns the n x 1 matrix as a list of values"""
return [row[0] for row in v_as_matrix]
def matrix_operate(A, v):
v_as_matrix = vector_as_matrix(v)
product = matrix_multiply(A, v_as_matrix)
return vector_from_matrix(product)
def find_eigenvector(A, tolerance=0.00001):
guess = [1 for __ in A]
while True:
result = matrix_operate(A, guess)
length = magnitude(result)
next_guess = scalar_multiply(1/length, result)
if distance(guess, next_guess) < tolerance:
return next_guess, length # eigenvector, eigenvalue
guess = next_guess
#
# eigenvector centrality
#
def entry_fn(i, j):
return 1 if (i, j) in friendships or (j, i) in friendships else 0
n = len(users)
adjacency_matrix = make_matrix(n, n, entry_fn)
eigenvector_centralities, _ = find_eigenvector(adjacency_matrix)
#
# directed graphs
#
endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (1, 3),
(2, 3), (3, 4), (5, 4), (5, 6), (7, 5), (6, 8), (8, 7), (8, 9)]
for user in users:
user["endorses"] = [] # add one list to track outgoing endorsements
user["endorsed_by"] = [] # and another to track endorsements
for source_id, target_id in endorsements:
users[source_id]["endorses"].append(users[target_id])
users[target_id]["endorsed_by"].append(users[source_id])
endorsements_by_id = [(user["id"], len(user["endorsed_by"]))
for user in users]
sorted(endorsements_by_id,
key=lambda pair: pair[1],
reverse=True)
def page_rank(users, damping = 0.85, num_iters = 100):
# initially distribute PageRank evenly
num_users = len(users)
pr = { user["id"] : 1 / num_users for user in users }
# this is the small fraction of PageRank
# that each node gets each iteration
base_pr = (1 - damping) / num_users
for __ in range(num_iters):
next_pr = { user["id"] : base_pr for user in users }
for user in users:
# distribute PageRank to outgoing links
links_pr = pr[user["id"]] * damping
for endorsee in user["endorses"]:
next_pr[endorsee["id"]] += links_pr / len(user["endorses"])
pr = next_pr
return pr
if __name__ == "__main__":
print("Betweenness Centrality")
for user in users:
print(user["id"], user["betweenness_centrality"])
print()
print("Closeness Centrality")
for user in users:
print(user["id"], user["closeness_centrality"])
print()
print("Eigenvector Centrality")
for user_id, centrality in enumerate(eigenvector_centralities):
print(user_id, centrality)
print()
print("PageRank")
for user_id, pr in page_rank(users).items():
print(user_id, pr)
|
aguegu/flask-oauthprovider
|
refs/heads/master
|
examples/init_db.py
|
3
|
from demoprovider.models import init_db
init_db()
|
qiuzhong/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-cors-w3c-tests/inst.wgt.py
|
4
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
# if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
for item in glob.glob("%s/cors/support/cgi/*" % SCRIPT_DIR):
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/cors/support/%s" %
(PKG_SRC_DIR, item_name)):
# if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
cosmoharrigan/pyrolog
|
refs/heads/master
|
prolog/interpreter/heap.py
|
1
|
from rpython.rlib import debug
from prolog.interpreter.term import BindingVar, AttVar
from rpython.rlib import jit
INIT_TRAIL_VAR = []
INIT_TRAIL_BINDING = []
UNROLL_SIZE = 6
class Heap(object):
def __init__(self, prev=None):
self.trail_var = INIT_TRAIL_VAR
debug.make_sure_not_resized(self.trail_var)
self.trail_binding = INIT_TRAIL_BINDING
debug.make_sure_not_resized(self.trail_binding)
self.i = 0
self.trail_attrs = None
self.prev = prev
self.discarded = False
self.hook = None
# _____________________________________________________
# interface that term.py uses
def _find_not_discarded(self):
while self is not None and self.discarded:
self = self.prev
return self
def add_trail_atts(self, attvar, attr_name):
if self._is_created_in_self(attvar):
return
value, index = attvar.get_attribute(attr_name)
self._add_entry_trail_attrs(attvar, index, value)
def trail_new_attr(self, attvar, index, value):
if self._is_created_in_self(attvar):
return
self._add_entry_trail_attrs(attvar, index, value)
def _add_entry_trail_attrs(self, attvar, index, value):
entry = (attvar, index, value)
if self.trail_attrs is None:
self.trail_attrs = [entry]
else:
self.trail_attrs.append(entry)
def add_trail(self, var):
""" Remember the current state of a variable to be able to backtrack it
to that state. Usually called just before a variable changes. """
# if the variable doesn't exist before the last choice point, don't
# trail it (variable shunting)
if self._is_created_in_self(var):
return
i = self.i
if i >= len(self.trail_var):
assert i == len(self.trail_var)
self._double_size()
self.trail_var[i] = var
self.trail_binding[i] = var.binding
self.i = i + 1
def _is_created_in_self(self, var):
created_in = var.created_after_choice_point
if self is created_in: # fast path
return True
if created_in is not None and created_in.discarded:
# unroll _find_not_discarded once for better jittability
created_in = created_in.prev
if created_in is not None and created_in.discarded:
created_in = created_in._find_not_discarded()
var.created_after_choice_point = created_in
return self is created_in
def _double_size(self):
l = len(self.trail_var)
if l == 0:
self.trail_var = [None, None]
self.trail_binding = [None, None]
elif l == 1:
assert 0, "cannot happen"
else:
self.trail_var = self.trail_var + [None] * l
self.trail_binding = self.trail_binding + [None] * l
def newvar(self):
""" Make a new variable. Should return a Var instance, possibly with
interesting attributes set that e.g. add_trail can inspect."""
result = BindingVar()
result.created_after_choice_point = self
return result
def new_attvar(self):
result = AttVar()
result.created_after_choice_point = self
return result
def newvar_in_term(self, parent, index):
from prolog.interpreter.term import var_in_term_classes
return self.newvar() # disabled for now
result = var_in_term_classes[index](parent)
result.created_after_choice_point = self
return result
# _____________________________________________________
def branch(self):
""" Branch of a heap for a choice point. The return value is the new
heap that should be used from now on, self is the heap that can be
backtracked to."""
res = Heap(self)
return res
@jit.unroll_safe
def revert_upto(self, heap, discard_choicepoint=False):
""" Revert to the heap corresponding to a choice point. The return
value is the new heap that should be used."""
previous = self
while self is not heap:
if self is None:
break
self._revert()
previous = self
self = self.prev
if discard_choicepoint:
return heap
return previous
@jit.look_inside_iff(lambda self: self.i < UNROLL_SIZE)
def _revert(self):
i = jit.promote(self.i) - 1
while i >= 0:
v = self.trail_var[i]
assert v is not None
v.binding = self.trail_binding[i]
self.trail_var[i] = None
self.trail_binding[i] = None
i -= 1
self.i = 0
if self.trail_attrs is not None:
for i in range(len(self.trail_attrs) - 1, -1, -1):
attvar, index, value = self.trail_attrs[i]
attvar.reset_field(index, value)
self.trail_attrs = None
self.hook = None
def discard(self, current_heap):
""" Remove a heap that is no longer needed (usually due to a cut) from
a chain of frames. """
self.discarded = True
if current_heap.prev is self:
current_heap._discard_try_remove_current_trail(self)
if current_heap.trail_attrs is not None:
current_heap._discard_try_remove_current_trail_attvars(self)
# move the variable bindings from the discarded heap to the current
# heap
self._discard_move_bindings_to_current(current_heap)
if self.trail_attrs is not None:
if current_heap.trail_attrs is not None:
current_heap.trail_attrs.extend(self.trail_attrs)
else:
current_heap.trail_attrs = self.trail_attrs
current_heap.prev = self.prev
self.trail_var = None
self.trail_binding = None
self.trail_attrs = None
self.i = -1
self.prev = current_heap
else:
return self
return current_heap
@jit.look_inside_iff(lambda self, discarded_heap:
self.i < UNROLL_SIZE)
def _discard_try_remove_current_trail(self, discarded_heap):
targetpos = 0
# check whether variables in the current heap no longer need to be
# traced, because they originate in the discarded heap
for i in range(jit.promote(self.i)):
var = self.trail_var[i]
binding = self.trail_binding[i]
if var.created_after_choice_point is discarded_heap:
var.created_after_choice_point = discarded_heap.prev
self.trail_var[i] = None
self.trail_binding[i] = None
else:
self.trail_var[targetpos] = var
self.trail_binding[targetpos] = binding
targetpos += 1
self.i = targetpos
def _discard_try_remove_current_trail_attvars(self, discarded_heap):
trail_attrs = []
targetpos = 0
for var, attr, value in self.trail_attrs:
if var.created_after_choice_point is discarded_heap:
var.created_after_choice_point = discarded_heap.prev
else:
trail_attrs[targetpos] = (var, attr, value)
if not trail_attrs:
trail_attrs = None
self.trail_attrs = trail_attrs
@jit.look_inside_iff(lambda self, current_heap:
self.i < UNROLL_SIZE)
def _discard_move_bindings_to_current(self, current_heap):
for i in range(jit.promote(self.i)):
var = self.trail_var[i]
currbinding = var.binding
binding = self.trail_binding[i]
var.binding = binding
current_heap.add_trail(var)
var.binding = currbinding
def __repr__(self):
return "<Heap %r trailed vars>" % (self.i, )
def _dot(self, seen):
if self in seen:
return
seen.add(self)
yield '%s [label="%r", shape=octagon]' % (id(self), self)
if self.prev:
yield "%s -> %s [label=prev]" % (id(self), id(self.prev))
for line in self.prev._dot(seen):
yield line
# methods to for the hook chain
def add_hook(self, attvar):
self.hook = HookCell(attvar, self.hook)
class HookCell(object):
def __init__(self, attvar, next=None):
self.attvar = attvar
self.next = next
|
debugger22/sympy
|
refs/heads/master
|
sympy/liealgebras/root_system.py
|
76
|
# -*- coding: utf-8 -*-
from .cartan_type import CartanType
from sympy.core import Basic
from sympy.core.compatibility import range
class RootSystem(Basic):
"""Represent the root system of a simple Lie algebra
Every simple Lie algebra has a unique root system. To find the root
system, we first consider the Cartan subalgebra of g, which is the maximal
abelian subalgebra, and consider the adjoint action of g on this
subalgebra. There is a root system associated with this action. Now, a
root system over a vector space V is a set of finite vectors Φ (called
roots), which satisfy:
1. The roots span V
2. The only scalar multiples of x in Φ are x and -x
3. For every x in Φ, the set Φ is closed under reflection
through the hyperplane perpendicular to x.
4. If x and y are roots in Φ, then the projection of y onto
the line through x is a half-integral multiple of x.
Now, there is a subset of Φ, which we will call Δ, such that:
1. Δ is a basis of V
2. Each root x in Φ can be written x = Σ k_y y for y in Δ
The elements of Δ are called the simple roots.
Therefore, we see that the simple roots span the root space of a given
simple Lie algebra.
References: https://en.wikipedia.org/wiki/Root_system
Lie Algebras and Representation Theory - Humphreys
"""
def __new__(cls, cartantype):
"""Create a new RootSystem object
This method assigns an attribute called cartan_type to each instance of
a RootSystem object. When an instance of RootSystem is called, it
needs an argument, which should be an instance of a simple Lie algebra.
We then take the CartanType of this argument and set it as the
cartan_type attribute of the RootSystem instance.
"""
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def simple_roots(self):
"""Generate the simple roots of the Lie algebra
The rank of the Lie algebra determines the number of simple roots that
it has. This method obtains the rank of the Lie algebra, and then uses
the simple_root method from the Lie algebra classes to generate all the
simple roots.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> roots = c.simple_roots()
>>> roots
{1: [1, -1, 0, 0], 2: [0, 1, -1, 0], 3: [0, 0, 1, -1]}
"""
n = self.cartan_type.rank()
roots = {}
for i in range(1, n+1):
root = self.cartan_type.simple_root(i)
roots[i] = root
return roots
def all_roots(self):
"""Generate all the roots of a given root system
The result is a dictionary where the keys are integer numbers. It
generates the roots by getting the dictionary of all positive roots
from the bases classes, and then taking each root, and multiplying it
by -1 and adding it to the dictionary. In this way all the negative
roots are generated.
"""
alpha = self.cartan_type.positive_roots()
keys = list(alpha.keys())
k = max(keys)
for val in keys:
k += 1
root = alpha[val]
newroot = [-x for x in root]
alpha[k] = newroot
return alpha
def root_space(self):
"""Return the span of the simple roots
The root space is the vector space spanned by the simple roots, i.e. it
is a vector space with a distinguished basis, the simple roots. This
method returns a string that represents the root space as the span of
the simple roots, alpha[1],...., alpha[n].
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.root_space()
'alpha[1] + alpha[2] + alpha[3]'
"""
n = self.cartan_type.rank()
rs = " + ".join("alpha["+str(i) +"]" for i in range(1, n+1))
return rs
def add_simple_roots(self, root1, root2):
"""Add two simple roots together
The function takes as input two integers, root1 and root2. It then
uses these integers as keys in the dictionary of simple roots, and gets
the corresponding simple roots, and then adds them together.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> newroot = c.add_simple_roots(1, 2)
>>> newroot
[1, 0, -1, 0]
"""
alpha = self.simple_roots()
if root1 > len(alpha) or root2 > len(alpha):
raise ValueError("You've used a root that doesn't exist!")
a1 = alpha[root1]
a2 = alpha[root2]
newroot = []
length = len(a1)
for i in range(length):
newroot.append(a1[i] + a2[i])
return newroot
def add_as_roots(self, root1, root2):
"""Add two roots together if and only if their sum is also a root
It takes as input two vectors which should be roots. It then computes
their sum and checks if it is in the list of all possible roots. If it
is, it returns the sum. Otherwise it returns a string saying that the
sum is not a root.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.add_as_roots([1, 0, -1, 0], [0, 0, 1, -1])
[1, 0, 0, -1]
>>> c.add_as_roots([1, -1, 0, 0], [0, 0, -1, 1])
'The sum of these two roots is not a root'
"""
alpha = self.all_roots()
newroot = []
for entry in range(len(root1)):
newroot.append(root1[entry] + root2[entry])
if newroot in alpha.values():
return newroot
else:
return "The sum of these two roots is not a root"
def cartan_matrix(self):
"""Cartan matrix of Lie algebra associated with this root system
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, -1, 2]])
"""
return self.cartan_type.cartan_matrix()
def dynkin_diagram(self):
"""Dynkin diagram of the Lie algebra associated with this root system
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> print(c.dynkin_diagram())
0---0---0
1 2 3
"""
return self.cartan_type.dynkin_diagram()
|
piotroxp/scibibscan
|
refs/heads/master
|
scib/lib/python3.5/site-packages/numpy/lib/twodim_base.py
|
83
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
trungnt13/scikit-learn
|
refs/heads/master
|
examples/applications/svm_gui.py
|
287
|
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
|
kutuhal/oracle-r12-accounting
|
refs/heads/master
|
lib/django/conf/locale/cy/__init__.py
|
12133432
| |
temberature/python-data-mining-platform
|
refs/heads/master
|
pymining/classifier/__init__.py
|
12133432
| |
joshjo/django-sentry
|
refs/heads/master
|
sentry/migrations/0003_auto__add_field_message_group__del_field_groupedmessage_server_name.py
|
14
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Message.group'
db.add_column('sentry_message', 'group', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='message_set', null=True, to=orm['sentry.GroupedMessage']), keep_default=False)
# Deleting field 'GroupedMessage.server_name'
db.delete_column('sentry_groupedmessage', 'server_name')
def backwards(self, orm):
# Deleting field 'Message.group'
db.delete_column('sentry_message', 'group_id')
# Adding field 'GroupedMessage.server_name'
db.add_column('sentry_groupedmessage', 'server_name', self.gf('django.db.models.fields.CharField')(default='', max_length=128, db_index=True), keep_default=False)
models = {
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['sentry']
|
jendap/tensorflow
|
refs/heads/master
|
tensorflow/compiler/tests/clustering_test.py
|
13
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the behavior of the auto-compilation pass."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
class ClusteringTest(xla_test.XLATestCase):
def testAdd(self):
val1 = np.array([4, 3, 2, 1], dtype=np.float32)
val2 = np.array([5, 6, 7, 8], dtype=np.float32)
expected = val1 + val2
with self.cached_session():
with self.test_scope():
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
output = math_ops.add(input1, input2)
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testAddFromCpuMultiple(self):
val1 = np.array([4, 3, 2, 1]).astype(np.float32)
val2 = np.array([5, 6, 7, 8]).astype(np.float32)
expected = val1 + val2
with self.cached_session():
with ops.device(CPU_DEVICE):
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
with self.test_scope():
output = math_ops.add(input1, input2)
for _ in xrange(10):
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testDeadlock(self):
# Builds a graph of the form:
# x -> y
# | \
# z -> w
# where x and z are placed on the CPU and y and w are placed on the XLA
# device. If y and w are clustered for compilation, then the graph will
# deadlock since the clustered graph will contain a self-loop.
with self.cached_session() as sess:
with ops.device(CPU_DEVICE):
x = array_ops.placeholder(dtypes.float32, [2])
with self.test_scope():
y = x * 2
with ops.device(CPU_DEVICE):
z = y * y
with self.test_scope():
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
def testHostMemory(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.int32)
with self.test_scope():
y = x + 1
with ops.device(CPU_DEVICE):
# Place a computation on the CPU, so y and w cannot be merged into the
# same JIT compilation.
z = y * 2
with self.test_scope():
# Argument 'y' is a non-constant output of a previous cluster. Make sure
# it is properly copied to host memory so it can be used as a
# compile-time constant input for this cluster.
w = array_ops.reshape(z, y)
result = sess.run(w, {x: [1, 0]})
expected = np.array([[4], [2]], dtype=np.int32)
self.assertAllClose(expected, result, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
|
minhtuancn/odoo
|
refs/heads/8.0
|
addons/mrp/wizard/mrp_price.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_price(osv.osv_memory):
_name = 'mrp.product_price'
_description = 'Product Price'
_columns = {
'number': fields.integer('Quantity', required=True, help="Specify quantity of products to produce or buy. Report of Cost structure will be displayed base on this quantity."),
}
_defaults = {
'number': 1,
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Product cost structure
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['number'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'product.price',
'datas' : datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hknyldz/pisitools
|
refs/heads/master
|
pisilinux/pisilinux/operations/upgrade.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 - 2007, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import sys
import gettext
__trans = gettext.translation('pisilinux', fallback=True)
_ = __trans.ugettext
import pisilinux
import pisilinux.ui as ui
import pisilinux.context as ctx
import pisilinux.pgraph as pgraph
import pisilinux.atomicoperations as atomicoperations
import pisilinux.operations as operations
import pisilinux.util as util
import pisilinux.db
import pisilinux.blacklist
def check_update_actions(packages):
installdb = pisilinux.db.installdb.InstallDB()
packagedb = pisilinux.db.packagedb.PackageDB()
actions = {}
for package in packages:
if not installdb.has_package(package):
continue
pkg = packagedb.get_package(package)
version, release, build = installdb.get_version(package)
pkg_actions = pkg.get_update_actions(release)
for action_name, action_targets in list(pkg_actions.items()):
item = actions.setdefault(action_name, [])
for action_target in action_targets:
item.append((package, action_target))
has_actions = False
if "serviceRestart" in actions:
has_actions = True
ctx.ui.warning(_("You must restart the following service(s) manually "
"for the updated software to take effect:"))
for package, target in actions["serviceRestart"]:
ctx.ui.info(" - %s" % target)
if "systemRestart" in actions:
has_actions = True
ctx.ui.warning(_("You must restart your system for the updates "
"in the following package(s) to take effect:"))
for package, target in actions["systemRestart"]:
ctx.ui.info(" - %s" % package)
return has_actions
def find_upgrades(packages, replaces):
packagedb = pisilinux.db.packagedb.PackageDB()
installdb = pisilinux.db.installdb.InstallDB()
debug = ctx.config.get_option("debug")
security_only = ctx.get_option('security_only')
comparesha1sum = ctx.get_option('compare_sha1sum')
Ap = []
ds = []
for i_pkg in packages:
if i_pkg in list(replaces.keys()):
# Replaced packages will be forced for upgrade, cause replaced packages are marked as obsoleted also. So we
# pass them.
continue
if i_pkg.endswith(ctx.const.package_suffix):
ctx.ui.debug(_("Warning: package *name* ends with '.pisilinux'"))
if not installdb.has_package(i_pkg):
ctx.ui.info(_('Package %s is not installed.') % i_pkg, True)
continue
if not packagedb.has_package(i_pkg):
ctx.ui.info(_('Package %s is not available in repositories.') % i_pkg, True)
continue
pkg = packagedb.get_package(i_pkg)
hash = installdb.get_install_tar_hash(i_pkg)
(version, release, build, distro, distro_release) = installdb.get_version_and_distro_release(i_pkg)
if security_only and not pkg.has_update_type("security", release):
continue
if pkg.distribution == distro and \
pisilinux.version.make_version(pkg.distributionRelease) > pisilinux.version.make_version(distro_release):
Ap.append(i_pkg)
else:
if int(release) < int(pkg.release):
Ap.append(i_pkg)
elif comparesha1sum and \
int(release) == int(pkg.release) and \
not pkg.installTarHash == hash:
Ap.append(i_pkg)
ds.append(i_pkg)
else:
ctx.ui.info(_('Package %s is already at the latest release %s.')
% (pkg.name, pkg.release), True)
if debug and ds:
ctx.ui.status(_('The following packages have different sha1sum:'))
ctx.ui.info(util.format_by_columns(sorted(ds)))
return Ap
def upgrade(A=[], repo=None):
"""Re-installs packages from the repository, trying to perform
a minimum or maximum number of upgrades according to options."""
packagedb = pisilinux.db.packagedb.PackageDB()
installdb = pisilinux.db.installdb.InstallDB()
replaces = packagedb.get_replaces()
if not A:
# if A is empty, then upgrade all packages
A = installdb.list_installed()
if repo:
repo_packages = set(packagedb.list_packages(repo))
A = set(A).intersection(repo_packages)
A_0 = A = set(A)
Ap = find_upgrades(A, replaces)
A = set(Ap)
# Force upgrading of installed but replaced packages or else they will be removed (they are obsoleted also).
# This is not wanted for a replaced driver package (eg. nvidia-X).
A |= set(pisilinux.util.flatten_list(list(replaces.values())))
A |= upgrade_base(A)
A = pisilinux.blacklist.exclude_from(A, ctx.const.blacklist)
if ctx.get_option('exclude_from'):
A = pisilinux.blacklist.exclude_from(A, ctx.get_option('exclude_from'))
if ctx.get_option('exclude'):
A = pisilinux.blacklist.exclude(A, ctx.get_option('exclude'))
ctx.ui.debug('A = %s' % str(A))
if len(A)==0:
ctx.ui.info(_('No packages to upgrade.'))
return True
ctx.ui.debug('A = %s' % str(A))
if not ctx.config.get_option('ignore_dependency'):
G_f, order = plan_upgrade(A, replaces=replaces)
else:
G_f = None
order = list(A)
componentdb = pisilinux.db.componentdb.ComponentDB()
# Bug 4211
if componentdb.has_component('system.base'):
order = operations.helper.reorder_base_packages(order)
ctx.ui.status(_('The following packages will be upgraded:'))
ctx.ui.info(util.format_by_columns(sorted(order)))
total_size, cached_size = operations.helper.calculate_download_sizes(order)
total_size, symbol = util.human_readable_size(total_size)
ctx.ui.info(util.colorize(_('Total size of package(s): %.2f %s') % (total_size, symbol), "yellow"))
needs_confirm = check_update_actions(order)
# NOTE: replaces.values() was already flattened above, it can be reused
if set(order) - A_0 - set(pisilinux.util.flatten_list(list(replaces.values()))):
ctx.ui.warning(_("There are extra packages due to dependencies."))
needs_confirm = True
if ctx.get_option('dry_run'):
return
if needs_confirm and \
not ctx.ui.confirm(_("Do you want to continue?")):
return False
ctx.ui.notify(ui.packagestogo, order = order)
conflicts = []
if not ctx.get_option('ignore_package_conflicts'):
conflicts = operations.helper.check_conflicts(order, packagedb)
paths = []
for x in order:
ctx.ui.info(util.colorize(_("Downloading %d / %d") % (order.index(x)+1, len(order)), "yellow"))
install_op = atomicoperations.Install.from_name(x)
paths.append(install_op.package_fname)
# fetch to be upgraded packages but do not install them.
if ctx.get_option('fetch_only'):
return
if conflicts:
operations.remove.remove_conflicting_packages(conflicts)
operations.remove.remove_obsoleted_packages()
for path in paths:
ctx.ui.info(util.colorize(_("Installing %d / %d") % (paths.index(path)+1, len(paths)), "yellow"))
install_op = atomicoperations.Install(path, ignore_file_conflicts = True)
install_op.install(not ctx.get_option('compare_sha1sum'))
def plan_upgrade(A, force_replaced=True, replaces=None):
# FIXME: remove force_replaced
# try to construct a pisilinux graph of packages to
# install / reinstall
packagedb = pisilinux.db.packagedb.PackageDB()
G_f = pgraph.PGraph(packagedb) # construct G_f
A = set(A)
# Force upgrading of installed but replaced packages or else they will be removed (they are obsoleted also).
# This is not wanted for a replaced driver package (eg. nvidia-X).
#
# FIXME: this is also not nice. this would not be needed if replaced packages are not written as obsoleted also.
# But if they are not written obsoleted "pisilinux index" indexes them
if force_replaced:
if replaces is None:
replaces = packagedb.get_replaces()
A |= set(pisilinux.util.flatten_list(list(replaces.values())))
# find the "install closure" graph of G_f by package
# set A using packagedb
for x in A:
G_f.add_package(x)
installdb = pisilinux.db.installdb.InstallDB()
def add_runtime_deps(pkg, Bp):
for dep in pkg.runtimeDependencies():
# add packages that can be upgraded
if installdb.has_package(dep.package) and dep.satisfied_by_installed():
continue
if dep.satisfied_by_repo():
if not dep.package in G_f.vertices():
Bp.add(str(dep.package))
# Always add the dependency info although the dependant
# package is already a member of this graph. Upgrade order
# might change if the dependency info differs from the
# previous ones.
G_f.add_dep(pkg.name, dep)
else:
ctx.ui.error(_('Dependency %s of %s cannot be satisfied') % (dep, pkg.name))
raise Exception(_("Upgrade is not possible."))
def add_resolvable_conflicts(pkg, Bp):
"""Try to resolve conflicts by upgrading
If a package B conflicts with an old version of package A and
does not conflict with the new version of A, add A to the upgrade list.
"""
for conflict in pkg.conflicts:
if conflict.package in G_f.vertices():
# Conflicting package is already in the upgrade list.
continue
if not pisilinux.conflict.installed_package_conflicts(conflict):
# Conflicting package is not installed.
# No need to deal with it.
continue
if not packagedb.has_package(conflict.package):
# Conflicting package is not available in repo.
# Installed package will be removed.
continue
new_pkg = packagedb.get_package(conflict.package)
if conflict.satisfies_relation(new_pkg.version, new_pkg.release):
# Package still conflicts with the repo package.
# Installed package will be removed.
continue
# Upgrading the package will resolve conflict.
# Add it to the upgrade list.
Bp.add(conflict.package)
G_f.add_package(conflict.package)
def add_broken_revdeps(pkg, Bp):
# Search reverse dependencies to see if anything
# should be upgraded
rev_deps = installdb.get_rev_deps(pkg.name)
for rev_dep, depinfo in rev_deps:
# add only installed but unsatisfied reverse dependencies
if rev_dep in G_f.vertices() or depinfo.satisfied_by_repo():
continue
if is_upgradable(rev_dep):
Bp.add(rev_dep)
G_f.add_plain_dep(rev_dep, pkg.name)
def add_needed_revdeps(pkg, Bp):
# Search for reverse dependency update needs of to be upgraded packages
# check only the installed ones.
version, release, build = installdb.get_version(pkg.name)
actions = pkg.get_update_actions(release)
packages = actions.get("reverseDependencyUpdate")
if packages:
for target_package in packages:
for name, dep in installdb.get_rev_deps(target_package):
if name in G_f.vertices() or not is_upgradable(name):
continue
Bp.add(name)
G_f.add_plain_dep(name, target_package)
while A:
Bp = set()
for x in A:
pkg = packagedb.get_package(x)
add_runtime_deps(pkg, Bp)
add_resolvable_conflicts(pkg, Bp)
if installdb.has_package(x):
add_broken_revdeps(pkg, Bp)
add_needed_revdeps(pkg, Bp)
A = Bp
if ctx.config.get_option('debug'):
G_f.write_graphviz(sys.stdout)
order = G_f.topological_sort()
order.reverse()
return G_f, order
def upgrade_base(A = set()):
installdb = pisilinux.db.installdb.InstallDB()
componentdb = pisilinux.db.componentdb.ComponentDB()
if not ctx.config.values.general.ignore_safety and not ctx.get_option('ignore_safety'):
if componentdb.has_component('system.base'):
systembase = set(componentdb.get_union_component('system.base').packages)
extra_installs = [x for x in systembase - set(A) if not installdb.has_package(x)]
extra_installs = pisilinux.blacklist.exclude_from(extra_installs, ctx.const.blacklist)
if extra_installs:
ctx.ui.warning(_("Safety switch forces the installation of "
"following packages:"))
ctx.ui.info(util.format_by_columns(sorted(extra_installs)))
G_f, install_order = operations.install.plan_install_pkg_names(extra_installs)
extra_upgrades = [x for x in systembase - set(install_order) if is_upgradable(x)]
upgrade_order = []
extra_upgrades = pisilinux.blacklist.exclude_from(extra_upgrades, ctx.const.blacklist)
if ctx.get_option('exclude_from'):
extra_upgrades = pisilinux.blacklist.exclude_from(extra_upgrades, ctx.get_option('exclude_from'))
if ctx.get_option('exclude'):
extra_upgrades = pisilinux.blacklist.exclude(extra_upgrades, ctx.get_option('exclude'))
if extra_upgrades:
ctx.ui.warning(_("Safety switch forces the upgrade of "
"following packages:"))
ctx.ui.info(util.format_by_columns(sorted(extra_upgrades)))
G_f, upgrade_order = plan_upgrade(extra_upgrades, force_replaced=False)
# return packages that must be added to any installation
return set(install_order + upgrade_order)
else:
ctx.ui.warning(_('Safety switch: The component system.base cannot be found.'))
return set()
def is_upgradable(name):
installdb = pisilinux.db.installdb.InstallDB()
if not installdb.has_package(name):
return False
(i_version, i_release, i_build, i_distro, i_distro_release) = installdb.get_version_and_distro_release(name)
packagedb = pisilinux.db.packagedb.PackageDB()
try:
version, release, build, distro, distro_release = packagedb.get_version_and_distro_release(name, packagedb.which_repo(name))
except KeyboardInterrupt:
raise
except Exception: #FIXME: what exception could we catch here, replace with that.
return False
if distro == i_distro and \
pisilinux.version.make_version(distro_release) > pisilinux.version.make_version(i_distro_release):
return True
return int(i_release) < int(release)
|
sjsucohort6/openstack
|
refs/heads/master
|
python/venv/lib/python2.7/site-packages/openstackclient/tests/compute/v2/test_server.py
|
1
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
import testtools
from openstackclient.common import exceptions
from openstackclient.common import utils as common_utils
from openstackclient.compute.v2 import server
from openstackclient.tests.compute.v2 import fakes as compute_fakes
from openstackclient.tests import fakes
from openstackclient.tests.image.v2 import fakes as image_fakes
from openstackclient.tests import utils
class TestServer(compute_fakes.TestComputev2):
def setUp(self):
super(TestServer, self).setUp()
# Get a shortcut to the ServerManager Mock
self.servers_mock = self.app.client_manager.compute.servers
self.servers_mock.reset_mock()
# Get a shortcut to the ImageManager Mock
self.cimages_mock = self.app.client_manager.compute.images
self.cimages_mock.reset_mock()
# Get a shortcut to the FlavorManager Mock
self.flavors_mock = self.app.client_manager.compute.flavors
self.flavors_mock.reset_mock()
# Get a shortcut to the ImageManager Mock
self.images_mock = self.app.client_manager.image.images
self.images_mock.reset_mock()
class TestServerCreate(TestServer):
def setUp(self):
super(TestServerCreate, self).setUp()
self.servers_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(compute_fakes.SERVER),
loaded=True,
)
new_server = fakes.FakeResource(
None,
copy.deepcopy(compute_fakes.SERVER),
loaded=True,
)
new_server.__dict__['networks'] = {}
self.servers_mock.get.return_value = new_server
self.image = fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
)
self.cimages_mock.get.return_value = self.image
self.flavor = fakes.FakeResource(
None,
copy.deepcopy(compute_fakes.FLAVOR),
loaded=True,
)
self.flavors_mock.get.return_value = self.flavor
# Get the command object to test
self.cmd = server.CreateServer(self.app, None)
def test_server_create_no_options(self):
arglist = [
compute_fakes.server_id,
]
verifylist = [
('server_name', compute_fakes.server_id),
]
try:
# Missing required args should bail here
self.check_parser(self.cmd, arglist, verifylist)
except utils.ParserException:
pass
def test_server_create_minimal(self):
arglist = [
'--image', 'image1',
'--flavor', 'flavor1',
compute_fakes.server_id,
]
verifylist = [
('image', 'image1'),
('flavor', 'flavor1'),
('config_drive', False),
('server_name', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = dict(
meta=None,
files={},
reservation_id=None,
min_count=1,
max_count=1,
security_groups=[],
userdata=None,
key_name=None,
availability_zone=None,
block_device_mapping={},
nics=[],
scheduler_hints={},
config_drive=None,
)
# ServerManager.create(name, image, flavor, **kwargs)
self.servers_mock.create.assert_called_with(
compute_fakes.server_id,
self.image,
self.flavor,
**kwargs
)
collist = ('addresses', 'flavor', 'id', 'name', 'properties')
self.assertEqual(collist, columns)
datalist = (
'',
'Large ()',
compute_fakes.server_id,
compute_fakes.server_name,
'',
)
self.assertEqual(datalist, data)
def test_server_create_with_network(self):
arglist = [
'--image', 'image1',
'--flavor', 'flavor1',
'--nic', 'net-id=net1',
'--nic', 'port-id=port1',
compute_fakes.server_id,
]
verifylist = [
('image', 'image1'),
('flavor', 'flavor1'),
('nic', ['net-id=net1', 'port-id=port1']),
('config_drive', False),
('server_name', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
get_endpoints = mock.Mock()
get_endpoints.return_value = {'network': []}
self.app.client_manager.auth_ref = mock.Mock()
self.app.client_manager.auth_ref.service_catalog = mock.Mock()
self.app.client_manager.auth_ref.service_catalog.get_endpoints = (
get_endpoints)
list_networks = mock.Mock()
list_ports = mock.Mock()
self.app.client_manager.network.list_networks = list_networks
self.app.client_manager.network.list_ports = list_ports
list_networks.return_value = {'networks': [{'id': 'net1_uuid'}]}
list_ports.return_value = {'ports': [{'id': 'port1_uuid'}]}
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = dict(
meta=None,
files={},
reservation_id=None,
min_count=1,
max_count=1,
security_groups=[],
userdata=None,
key_name=None,
availability_zone=None,
block_device_mapping={},
nics=[{'net-id': 'net1_uuid',
'v4-fixed-ip': '',
'v6-fixed-ip': '',
'port-id': ''},
{'net-id': '',
'v4-fixed-ip': '',
'v6-fixed-ip': '',
'port-id': 'port1_uuid'}],
scheduler_hints={},
config_drive=None,
)
# ServerManager.create(name, image, flavor, **kwargs)
self.servers_mock.create.assert_called_with(
compute_fakes.server_id,
self.image,
self.flavor,
**kwargs
)
collist = ('addresses', 'flavor', 'id', 'name', 'properties')
self.assertEqual(collist, columns)
datalist = (
'',
'Large ()',
compute_fakes.server_id,
compute_fakes.server_name,
'',
)
self.assertEqual(datalist, data)
@mock.patch('openstackclient.compute.v2.server.io.open')
def test_server_create_userdata(self, mock_open):
mock_file = mock.MagicMock(name='File')
mock_open.return_value = mock_file
mock_open.read.return_value = '#!/bin/sh'
arglist = [
'--image', 'image1',
'--flavor', 'flavor1',
'--user-data', 'userdata.sh',
compute_fakes.server_id,
]
verifylist = [
('image', 'image1'),
('flavor', 'flavor1'),
('user_data', 'userdata.sh'),
('config_drive', False),
('server_name', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Ensure the userdata file is opened
mock_open.assert_called_with('userdata.sh')
# Ensure the userdata file is closed
mock_file.close.assert_called_with()
# Set expected values
kwargs = dict(
meta=None,
files={},
reservation_id=None,
min_count=1,
max_count=1,
security_groups=[],
userdata=mock_file,
key_name=None,
availability_zone=None,
block_device_mapping={},
nics=[],
scheduler_hints={},
config_drive=None,
)
# ServerManager.create(name, image, flavor, **kwargs)
self.servers_mock.create.assert_called_with(
compute_fakes.server_id,
self.image,
self.flavor,
**kwargs
)
collist = ('addresses', 'flavor', 'id', 'name', 'properties')
self.assertEqual(collist, columns)
datalist = (
'',
'Large ()',
compute_fakes.server_id,
compute_fakes.server_name,
'',
)
self.assertEqual(datalist, data)
class TestServerDelete(TestServer):
def setUp(self):
super(TestServerDelete, self).setUp()
# This is the return value for utils.find_resource()
self.servers_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(compute_fakes.SERVER),
loaded=True,
)
self.servers_mock.delete.return_value = None
# Get the command object to test
self.cmd = server.DeleteServer(self.app, None)
def test_server_delete_no_options(self):
arglist = [
compute_fakes.server_id,
]
verifylist = [
('servers', [compute_fakes.server_id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
self.servers_mock.delete.assert_called_with(
compute_fakes.server_id,
)
@mock.patch.object(common_utils, 'wait_for_delete', return_value=True)
def test_server_delete_wait_ok(self, mock_wait_for_delete):
arglist = [
compute_fakes.server_id, '--wait'
]
verifylist = [
('servers', [compute_fakes.server_id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
self.servers_mock.delete.assert_called_with(
compute_fakes.server_id,
)
mock_wait_for_delete.assert_called_once_with(
self.servers_mock,
compute_fakes.server_id,
callback=server._show_progress
)
@mock.patch.object(common_utils, 'wait_for_delete', return_value=False)
def test_server_delete_wait_fails(self, mock_wait_for_delete):
arglist = [
compute_fakes.server_id, '--wait'
]
verifylist = [
('servers', [compute_fakes.server_id]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.assertRaises(SystemExit, self.cmd.take_action, parsed_args)
self.servers_mock.delete.assert_called_with(
compute_fakes.server_id,
)
mock_wait_for_delete.assert_called_once_with(
self.servers_mock,
compute_fakes.server_id,
callback=server._show_progress
)
class TestServerImageCreate(TestServer):
def setUp(self):
super(TestServerImageCreate, self).setUp()
# This is the return value for utils.find_resource()
self.servers_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(compute_fakes.SERVER),
loaded=True,
)
self.servers_mock.create_image.return_value = image_fakes.image_id
self.images_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
)
# Get the command object to test
self.cmd = server.CreateServerImage(self.app, None)
def test_server_image_create_no_options(self):
arglist = [
compute_fakes.server_id,
]
verifylist = [
('server', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ServerManager.create_image(server, image_name, metadata=)
self.servers_mock.create_image.assert_called_with(
self.servers_mock.get.return_value,
compute_fakes.server_name,
)
collist = ('id', 'name', 'owner', 'protected', 'visibility')
self.assertEqual(collist, columns)
datalist = (
image_fakes.image_id,
image_fakes.image_name,
image_fakes.image_owner,
image_fakes.image_protected,
image_fakes.image_visibility,
)
self.assertEqual(datalist, data)
def test_server_image_create_name(self):
arglist = [
'--name', 'img-nam',
compute_fakes.server_id,
]
verifylist = [
('name', 'img-nam'),
('server', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ServerManager.create_image(server, image_name, metadata=)
self.servers_mock.create_image.assert_called_with(
self.servers_mock.get.return_value,
'img-nam',
)
collist = ('id', 'name', 'owner', 'protected', 'visibility')
self.assertEqual(collist, columns)
datalist = (
image_fakes.image_id,
image_fakes.image_name,
image_fakes.image_owner,
image_fakes.image_protected,
image_fakes.image_visibility,
)
self.assertEqual(datalist, data)
class TestServerResize(TestServer):
def setUp(self):
super(TestServerResize, self).setUp()
# This is the return value for utils.find_resource()
self.servers_get_return_value = fakes.FakeResource(
None,
copy.deepcopy(compute_fakes.SERVER),
loaded=True,
)
self.servers_mock.get.return_value = self.servers_get_return_value
self.servers_mock.resize.return_value = None
self.servers_mock.confirm_resize.return_value = None
self.servers_mock.revert_resize.return_value = None
# This is the return value for utils.find_resource()
self.flavors_get_return_value = fakes.FakeResource(
None,
copy.deepcopy(compute_fakes.FLAVOR),
loaded=True,
)
self.flavors_mock.get.return_value = self.flavors_get_return_value
# Get the command object to test
self.cmd = server.ResizeServer(self.app, None)
def test_server_resize_no_options(self):
arglist = [
compute_fakes.server_id,
]
verifylist = [
('confirm', False),
('revert', False),
('server', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
self.servers_mock.get.assert_called_with(
compute_fakes.server_id,
)
self.assertNotCalled(self.servers_mock.resize)
self.assertNotCalled(self.servers_mock.confirm_resize)
self.assertNotCalled(self.servers_mock.revert_resize)
def test_server_resize(self):
arglist = [
'--flavor', compute_fakes.flavor_id,
compute_fakes.server_id,
]
verifylist = [
('flavor', compute_fakes.flavor_id),
('confirm', False),
('revert', False),
('server', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
self.servers_mock.get.assert_called_with(
compute_fakes.server_id,
)
self.flavors_mock.get.assert_called_with(
compute_fakes.flavor_id,
)
self.servers_mock.resize.assert_called_with(
self.servers_get_return_value,
self.flavors_get_return_value,
)
self.assertNotCalled(self.servers_mock.confirm_resize)
self.assertNotCalled(self.servers_mock.revert_resize)
def test_server_resize_confirm(self):
arglist = [
'--confirm',
compute_fakes.server_id,
]
verifylist = [
('confirm', True),
('revert', False),
('server', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
self.servers_mock.get.assert_called_with(
compute_fakes.server_id,
)
self.assertNotCalled(self.servers_mock.resize)
self.servers_mock.confirm_resize.assert_called_with(
self.servers_get_return_value,
)
self.assertNotCalled(self.servers_mock.revert_resize)
def test_server_resize_revert(self):
arglist = [
'--revert',
compute_fakes.server_id,
]
verifylist = [
('confirm', False),
('revert', True),
('server', compute_fakes.server_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
self.servers_mock.get.assert_called_with(
compute_fakes.server_id,
)
self.assertNotCalled(self.servers_mock.resize)
self.assertNotCalled(self.servers_mock.confirm_resize)
self.servers_mock.revert_resize.assert_called_with(
self.servers_get_return_value,
)
class TestServerGeneral(testtools.TestCase):
OLD = {
'private': [
{
'addr': '192.168.0.3',
'version': 4,
},
]
}
NEW = {
'foo': [
{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:93:b3:01',
'version': 4,
'addr': '10.10.1.2',
'OS-EXT-IPS:type': 'fixed',
},
{
'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:93:b3:02',
'version': 6,
'addr': '0:0:0:0:0:ffff:a0a:103',
'OS-EXT-IPS:type': 'floating',
},
]
}
ODD = {'jenkins': ['10.3.3.18', '124.12.125.4']}
def test_get_ip_address(self):
self.assertEqual("192.168.0.3",
server._get_ip_address(self.OLD, 'private', [4, 6]))
self.assertEqual("10.10.1.2",
server._get_ip_address(self.NEW, 'fixed', [4, 6]))
self.assertEqual("10.10.1.2",
server._get_ip_address(self.NEW, 'private', [4, 6]))
self.assertEqual("0:0:0:0:0:ffff:a0a:103",
server._get_ip_address(self.NEW, 'public', [6]))
self.assertEqual("0:0:0:0:0:ffff:a0a:103",
server._get_ip_address(self.NEW, 'floating', [6]))
self.assertEqual("124.12.125.4",
server._get_ip_address(self.ODD, 'public', [4, 6]))
self.assertEqual("10.3.3.18",
server._get_ip_address(self.ODD, 'private', [4, 6]))
self.assertRaises(exceptions.CommandError,
server._get_ip_address, self.NEW, 'public', [4])
self.assertRaises(exceptions.CommandError,
server._get_ip_address, self.NEW, 'admin', [4])
self.assertRaises(exceptions.CommandError,
server._get_ip_address, self.OLD, 'public', [4, 6])
self.assertRaises(exceptions.CommandError,
server._get_ip_address, self.OLD, 'private', [6])
|
sburnett/seattle
|
refs/heads/master
|
network_semantics_tests/tests/sock.recv/sock_arg.py
|
1
|
# various tests for socketlikeobj argument semantics
#a dummy waitforconn to connect to
def nothing(rip,rport,sock,th,lh):
while mycontext['keep_testing']:
pass
sock.close()
# a timeout function
def fail_test(sock,handle,thing):
print 'ERROR: recv('+str(thing)+') blocked'
sock.close()
stopcomm(handle)
mycontext['keep_testing']=False
exitall()
if callfunc == 'initialize':
mycontext['keep_testing'] = True
ip = '127.0.0.1'
port = 12345
handle = waitforconn(ip,port,nothing)
sock = openconn(ip,port)
#test that an exception occurs if you send something not a string or char
for thing in [5,['hi','there']]:
try:
sock.send(thing)
except:
pass
else:
print 'sending an invalid obj: '+str(thing)+' did not cause exception'
#test that recv(not int) causes an exception
for thing in ['hello',['hi','there'],3.14,-10,0]:
timer = settimer(1,fail_test,[sock,handle,thing])
try:
sock.recv(thing)
except:
canceltimer(timer)
sock.close()
mycontext['keep_testing'] = False
stopcomm(handle)
|
SMatsushi/gtest
|
refs/heads/master
|
test/gtest_uninitialized_test.py
|
2901
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
|
mKeRix/home-assistant
|
refs/heads/dev
|
tests/components/adguard/__init__.py
|
30
|
"""Tests for the AdGuard Home component."""
|
poryfly/scikit-learn
|
refs/heads/master
|
sklearn/feature_selection/tests/test_variance_threshold.py
|
143
|
from sklearn.utils.testing import (assert_array_equal, assert_equal,
assert_raises)
from scipy.sparse import bsr_matrix, csc_matrix, csr_matrix
from sklearn.feature_selection import VarianceThreshold
data = [[0, 1, 2, 3, 4],
[0, 2, 2, 3, 5],
[1, 1, 2, 4, 0]]
def test_zero_variance():
# Test VarianceThreshold with default setting, zero variance.
for X in [data, csr_matrix(data), csc_matrix(data), bsr_matrix(data)]:
sel = VarianceThreshold().fit(X)
assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True))
assert_raises(ValueError, VarianceThreshold().fit, [0, 1, 2, 3])
assert_raises(ValueError, VarianceThreshold().fit, [[0, 1], [0, 1]])
def test_variance_threshold():
# Test VarianceThreshold with custom variance.
for X in [data, csr_matrix(data)]:
X = VarianceThreshold(threshold=.4).fit_transform(X)
assert_equal((len(data), 1), X.shape)
|
w15971597/june
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/db/backends/postgresql_psycopg2/__init__.py
|
12133432
| |
nuagenetworks/tempest
|
refs/heads/master
|
tempest/services/image/__init__.py
|
12133432
| |
NamedGod/shadowsocks
|
refs/heads/master
|
tests/test_udp_src.py
|
1009
|
#!/usr/bin/python
import socket
import socks
SERVER_IP = '127.0.0.1'
SERVER_PORT = 1081
if __name__ == '__main__':
# Test 1: same source port IPv4
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('127.0.0.1', 9001))
sock_in2.bind(('127.0.0.1', 9002))
sock_out.sendto(b'data', ('127.0.0.1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('127.0.0.1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 2: same source port IPv6
# try again from the same port but IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_in2.bind(('::1', 9002))
sock_out.sendto(b'data', ('::1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('::1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 3: different source ports IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9003))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_out.sendto(b'data', ('::1', 9001))
result3 = sock_in1.recvfrom(8)
# make sure they're from different source ports
assert result1 != result3
sock_out.close()
sock_in1.close()
|
bigswitch/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/networks/ports/extensions/allowed_address_pairs/tables.py
|
7
|
# Copyright 2015, Alcatel-Lucent USA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from openstack_dashboard import api
from openstack_dashboard import policy
from horizon import exceptions
from horizon import tables
LOG = logging.getLogger(__name__)
class AddAllowedAddressPair(policy.PolicyTargetMixin, tables.LinkAction):
name = "AddAllowedAddressPair"
verbose_name = _("Add Allowed Address Pair")
url = "horizon:project:networks:ports:addallowedaddresspairs"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "update_port"),)
def get_link_url(self, port=None):
if port:
return reverse(self.url, args=(port.id,))
else:
return reverse(self.url, args=(self.table.kwargs.get('port_id'),))
class DeleteAllowedAddressPair(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete",
u"Delete",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted address pair",
u"Deleted address pairs",
count
)
def delete(self, request, ip_address):
try:
port_id = self.table.kwargs['port_id']
port = api.neutron.port_get(request, port_id)
pairs = port.get('allowed_address_pairs', [])
pairs = [pair for pair in pairs
if pair['ip_address'] != ip_address]
pairs = [pair.to_dict() for pair in pairs]
api.neutron.port_update(request, port_id,
allowed_address_pairs=pairs)
except Exception as e:
LOG.error('Failed to update port %(port_id)s: %(reason)s',
{'port_id': port_id, 'reason': e})
redirect = reverse("horizon:project:networks:ports:detail",
args=(port_id,))
exceptions.handle(request, _('Failed to update port %s') % port_id,
redirect=redirect)
class AllowedAddressPairsTable(tables.DataTable):
IP = tables.Column("ip_address",
verbose_name=_("IP Address or CIDR"))
mac = tables.Column('mac_address', verbose_name=_("MAC Address"))
def get_object_display(self, address_pair):
return address_pair['ip_address']
class Meta(object):
name = "allowed_address_pairs"
verbose_name = _("Allowed Address Pairs")
row_actions = (DeleteAllowedAddressPair,)
table_actions = (AddAllowedAddressPair, DeleteAllowedAddressPair)
|
tcp813/mouTools
|
refs/heads/master
|
qt/treeview.py
|
1
|
from PyQt5.Qt import *
class Delegate(QStyledItemDelegate):
def __init__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
class Model(QAbstractItemModel):
def __init__(self, parent=None):
QAbstractItemModel.__init__(self, parent)
def rowCount(self, index):
return 10
def columnCount(self, index):
return 3
def data(self, index, role):
if role == Qt.DisplayRole:
return 'xxxx'
class View(QTreeView):
def __init__(self, parent=None):
QTreeView.__init__(self, parent)
class Widget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.resize(800,600)
layout = QHBoxLayout()
self.setLayout(layout)
self.view = View(self)
self.model = Model(self)
self.view.setModel(self.model)
self.delegate = Delegate(self)
self.view.setItemDelegate(self.delegate)
layout.addWidget(self.view)
if __name__ == '__main__':
app = QApplication([])
w = Widget()
w.show()
app.exec_()
|
sagarghuge/recurringtask
|
refs/heads/master
|
tests/tools/test_dates.py
|
2
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2014 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from unittest import TestCase
from datetime import date, timedelta
from GTG import _
from GTG.tools.dates import Date
def next_month(aday, day=None):
""" Increase month, change 2012-02-13 into 2012-03-13.
If day is set, replace day in month as well
@returns: updated date """
if day is None:
day = aday.day
if aday.month == 12:
return aday.replace(day=day, month=1, year=aday.year + 1)
else:
return aday.replace(day=day, month=aday.month + 1)
class TestDates(TestCase):
def test_parses_common_formats(self):
self.assertEqual(str(Date.parse("1985-03-29")), "1985-03-29")
self.assertEqual(str(Date.parse("19850329")), "1985-03-29")
self.assertEqual(str(Date.parse("1985/03/29")), "1985-03-29")
def test_parses_todays_month_day_format(self):
today = date.today()
parse_string = "%02d%02d" % (today.month, today.day)
self.assertEqual(Date.parse(parse_string), today)
def test_parses_today_as_today(self):
today = date.today()
self.assertEqual(Date(today), today)
def test_parse_fuzzy_dates(self):
""" Parse fuzzy dates like now, soon, later, someday """
self.assertEqual(Date.parse("now"), Date.now())
self.assertEqual(Date.parse("soon"), Date.soon())
self.assertEqual(Date.parse("later"), Date.someday())
self.assertEqual(Date.parse("someday"), Date.someday())
self.assertEqual(Date.parse(""), Date.no_date())
def test_parse_local_fuzzy_dates(self):
""" Parse fuzzy dates in their localized version """
self.assertEqual(Date.parse(_("now")), Date.now())
self.assertEqual(Date.parse(_("soon")), Date.soon())
self.assertEqual(Date.parse(_("later")), Date.someday())
self.assertEqual(Date.parse(_("someday")), Date.someday())
self.assertEqual(Date.parse(""), Date.no_date())
def test_parse_fuzzy_dates_str(self):
""" Print fuzzy dates in localized version """
self.assertEqual(str(Date.parse("now")), _("now"))
self.assertEqual(str(Date.parse("soon")), _("soon"))
self.assertEqual(str(Date.parse("later")), _("someday"))
self.assertEqual(str(Date.parse("someday")), _("someday"))
self.assertEqual(str(Date.parse("")), "")
def test_parse_week_days(self):
""" Parse name of week days and don't care about case-sensitivity """
weekday = date.today().weekday()
for i, day in enumerate(['Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', 'Sunday']):
if i <= weekday:
expected = date.today() + timedelta(7 + i - weekday)
else:
expected = date.today() + timedelta(i - weekday)
self.assertEqual(Date.parse(day), expected)
self.assertEqual(Date.parse(day.lower()), expected)
self.assertEqual(Date.parse(day.upper()), expected)
# Test localized version
day = _(day)
self.assertEqual(Date.parse(day), expected)
self.assertEqual(Date.parse(day.lower()), expected)
self.assertEqual(Date.parse(day.upper()), expected)
def test_missing_year_this_year(self):
""" Parsing %m%d have to find correct date:
we enter a day this year """
aday = next_month(date.today(), day=1)
parse_string = "%02d%02d" % (aday.month, aday.day)
self.assertEqual(Date.parse(parse_string), aday)
def test_missing_year_next_year(self):
""" Parsing %m%d have to find correct date:
we enter a day the next year """
aday = date.today()
if aday.day == 1 and aday.month == 1:
# not possible to add a day next year
return
aday = aday.replace(year=aday.year + 1, month=1, day=1)
self.assertEqual(Date.parse("0101"), aday)
def test_on_certain_day(self):
""" Parse due:3 as 3rd day this month or next month
if it is already more or already 3rd day """
for i in range(28):
i += 1
aday = date.today()
if i <= aday.day:
aday = next_month(aday, i)
else:
aday = aday.replace(day=i)
self.assertEqual(Date.parse(str(i)), aday)
|
pavels/pootle
|
refs/heads/master
|
pootle/apps/staticpages/migrations/__init__.py
|
12133432
| |
Kazade/NeHe-Website
|
refs/heads/master
|
django/middleware/__init__.py
|
12133432
| |
ksiomelo/cubix
|
refs/heads/master
|
fca/compare_context.py
|
1
|
""" Comparing contexts """
def subseteq_table(table_left, table_right):
"""
Checks whether one binary relation of two sets (presented as bool table) is
subset or equals another. Tables should be given as lists of bool lists and
should have the same dimensions. Result is bool.
table_left ?\subseteq table_right
"""
if len(table_left) != len(table_right):
raise ValueError("Number of rows in left table (=%i) and number of rows"
"in right table (=%i) must agree" % (len(table_left),
len(table_right)))
elif (len(table_left) != 0) and len(table_left[0]) != len(table_right[0]):
raise ValueError("Number of columns in left table (=%i) and number of"
"columns in right table (=%i) must agree"
%(len(table_left[0]), len(table_right[0])))
row_length = len(table_left)
table_size = row_length * len(table_left[0])
for i in range(table_size):
a = (i / row_length)
b = (i % row_length)
if not (table_left[b][a] <= table_right[b][a]):
return False
return True
|
geekboxzone/lollipop_external_chromium_org
|
refs/heads/geekbox
|
tools/perf/benchmarks/tab_switching.py
|
34
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from measurements import tab_switching
import page_sets
@benchmark.Enabled('has tabs')
class TabSwitchingTop10(benchmark.Benchmark):
test = tab_switching.TabSwitching
page_set = page_sets.Top10PageSet
@benchmark.Enabled('has tabs')
class TabSwitchingTypical25(benchmark.Benchmark):
test = tab_switching.TabSwitching
page_set = page_sets.Typical25PageSet
@benchmark.Enabled('has tabs')
class TabSwitchingFiveBlankTabs(benchmark.Benchmark):
test = tab_switching.TabSwitching
page_set = page_sets.FiveBlankPagesPageSet
options = {'pageset_repeat': 10}
@benchmark.Enabled('has tabs')
class TabSwitchingToughEnergyCases(benchmark.Benchmark):
test = tab_switching.TabSwitching
page_set = page_sets.ToughEnergyCasesPageSet
options = {'pageset_repeat': 10}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.